aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-04-05 02:14:21 -0400
committerDave Airlie <airlied@redhat.com>2014-04-05 02:14:21 -0400
commit9f97ba806a9cb8e828baca71eca8b684939053d8 (patch)
treeec036e36d1d2e64b6e67c4d5d24a5ecb6475aac0
parent82c68b6ccd54117a87cb2d9b91c2ee6e1280cf9d (diff)
parent10b6ee4a87811a110cb01eaca01eb04da6801baf (diff)
Merge tag 'drm-intel-fixes-2014-04-04' of git://anongit.freedesktop.org/drm-intel into drm-next
Merge window -fixes pull request as usual. Well, I did sneak in Jani's drm_i915_private_t typedef removal, need to have fun with a big sed job too ;-) Otherwise: - hdmi interlaced fixes (Jesse&Ville) - pipe error/underrun/crc tracking fixes, regression in late 3.14-rc (but not cc: stable since only really relevant for igt runs) - large cursor wm fixes (Chris) - fix gpu turbo boost/throttle again, was getting stuck due to vlv rps patches (Chris+Imre) - fix runtime pm fallout (Paulo) - bios framebuffer inherit fix (Chris) - a few smaller things * tag 'drm-intel-fixes-2014-04-04' of git://anongit.freedesktop.org/drm-intel: (196 commits) Skip intel_crt_init for Dell XPS 8700 drm/i915: vlv: fix RPS interrupt mask setting Revert "drm/i915/vlv: fixup DDR freq detection per Punit spec" drm/i915: move power domain init earlier during system resume drm/i915: Fix the computation of required fb size for pipe drm/i915: don't get/put runtime PM at the debugfs forcewake file drm/i915: fix WARNs when reading DDI state while suspended drm/i915: don't read cursor registers on powered down pipes drm/i915: get runtime PM at i915_display_info drm/i915: don't read pp_ctrl_reg if we're suspended drm/i915: get runtime PM at i915_reg_read_ioctl drm/i915: don't schedule force_wake_timer at gen6_read drm/i915: vlv: reserve the GT power context only once during driver init drm/i915: prefer struct drm_i915_private to drm_i915_private_t drm/i915/overlay: prefer struct drm_i915_private to drm_i915_private_t drm/i915/ringbuffer: prefer struct drm_i915_private to drm_i915_private_t drm/i915/display: prefer struct drm_i915_private to drm_i915_private_t drm/i915/irq: prefer struct drm_i915_private to drm_i915_private_t drm/i915/gem: prefer struct drm_i915_private to drm_i915_private_t drm/i915/dma: prefer struct drm_i915_private to drm_i915_private_t ...
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8851.txt1
-rw-r--r--Documentation/networking/netlink_mmap.txt4
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/sama5d36.dtsi2
-rw-r--r--arch/mips/Kconfig24
-rw-r--r--arch/mips/alchemy/board-gpr.c4
-rw-r--r--arch/mips/alchemy/board-mtx1.c4
-rw-r--r--arch/mips/bcm47xx/board.c1
-rw-r--r--arch/mips/bcm47xx/nvram.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c22
-rw-r--r--arch/mips/include/asm/asmmacro.h15
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/ftrace.h20
-rw-r--r--arch/mips/include/asm/syscall.h10
-rw-r--r--arch/mips/include/uapi/asm/inst.h4
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/r4k_fpu.S16
-rw-r--r--arch/mips/kernel/rtlx-cmp.c3
-rw-r--r--arch/mips/kernel/rtlx-mt.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mti-malta/malta-amon.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c4
-rw-r--r--arch/mips/pci/msi-octeon.c1
-rw-r--r--arch/parisc/include/asm/page.h11
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/cache.c64
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S71
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/syscalls.S4
-rw-r--r--arch/sparc/mm/tsb.c2
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/topology.h3
-rw-r--r--arch/x86/kernel/aperture_64.c20
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--block/blk-core.c25
-rw-r--r--block/blk-flush.c11
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/rbd.c1
-rw-r--r--drivers/clocksource/vf_pit_timer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c64
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c48
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c92
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c46
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c279
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c77
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c29
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c156
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c63
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c11
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-sony.c27
-rw-r--r--drivers/hid/hidraw.c4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/input/evdev.c4
-rw-r--r--drivers/input/keyboard/adp5588-keys.c12
-rw-r--r--drivers/input/misc/da9052_onkey.c29
-rw-r--r--drivers/input/mouse/cypress_ps2.c1
-rw-r--r--drivers/input/mouse/synaptics.c55
-rw-r--r--drivers/input/mousedev.c73
-rw-r--r--drivers/isdn/capi/Kconfig18
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c14
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c111
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c30
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c53
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/usb/cdc_ncm.c48
-rw-r--r--drivers/net/usb/usbnet.c33
-rw-r--r--drivers/net/veth.c5
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/vxlan.c130
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c52
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c23
-rw-r--r--drivers/tty/serial/sunhv.c22
-rw-r--r--drivers/tty/serial/sunsab.c14
-rw-r--r--drivers/tty/serial/sunsu.c14
-rw-r--r--drivers/tty/serial/sunzilog.c14
-rw-r--r--drivers/vhost/net.c20
-rw-r--r--drivers/xen/balloon.c24
-rw-r--r--fs/anon_inodes.c34
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/ext4/inode.c15
-rw-r--r--fs/file.c19
-rw-r--r--fs/mount.h4
-rw-r--r--fs/namei.c29
-rw-r--r--fs/namespace.c177
-rw-r--r--fs/nfsd/vfs.c1
-rw-r--r--fs/ocfs2/stackglue.c4
-rw-r--r--fs/pnode.c26
-rw-r--r--fs/pnode.h4
-rw-r--r--fs/read_write.c4
-rw-r--r--include/linux/bitops.h15
-rw-r--r--include/linux/ftrace_event.h4
-rw-r--r--include/linux/netdev_features.h7
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/net/if_inet6.h4
-rw-r--r--include/net/tcp.h11
-rw-r--r--include/trace/ftrace.h7
-rw-r--r--kernel/audit.c12
-rw-r--r--kernel/cgroup.c11
-rw-r--r--kernel/futex.c53
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--kernel/trace/trace_events.c6
-rw-r--r--kernel/trace/trace_export.c7
-rw-r--r--lib/fonts/Kconfig6
-rw-r--r--lib/random32.c13
-rw-r--r--mm/fremap.c28
-rw-r--r--mm/migrate.c32
-rw-r--r--mm/rmap.c5
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/bridge/br_input.c11
-rw-r--r--net/bridge/br_vlan.c44
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c10
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/ipv4/gre_demux.c8
-rw-r--r--net/ipv4/ip_tunnel.c3
-rw-r--r--net/ipv4/ip_tunnel_core.c1
-rw-r--r--net/ipv4/ipmr.c13
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv6/addrconf.c193
-rw-r--r--net/ipv6/ip6_output.c14
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/key/af_key.c19
-rw-r--r--net/netfilter/nfnetlink_queue_core.c9
-rw-r--r--net/openvswitch/datapath.c15
-rw-r--r--net/openvswitch/flow.c29
-rw-r--r--net/tipc/subscr.c29
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/xfrm/xfrm_user.c6
-rw-r--r--security/capability.c3
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/hooks.c13
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/include/xfrm.h3
-rw-r--r--security/selinux/selinuxfs.c28
-rw-r--r--security/selinux/ss/services.c6
-rw-r--r--security/selinux/xfrm.c14
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/pci/oxygen/xonar_dg.c30
-rw-r--r--tools/perf/bench/numa.c1
-rw-r--r--tools/perf/builtin-bench.c2
189 files changed, 2169 insertions, 1502 deletions
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c3d805..4fc392763611 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -7,3 +7,4 @@ Required properties:
7 7
8Optional properties: 8Optional properties:
9- local-mac-address : Ethernet mac address to use 9- local-mac-address : Ethernet mac address to use
10- vdd-supply: supply for Ethernet mac
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index b26122973525..c6af4bac5aa8 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -226,9 +226,9 @@ Ring setup:
226 void *rx_ring, *tx_ring; 226 void *rx_ring, *tx_ring;
227 227
228 /* Configure ring parameters */ 228 /* Configure ring parameters */
229 if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0) 229 if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
230 exit(1); 230 exit(1);
231 if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0) 231 if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
232 exit(1) 232 exit(1)
233 233
234 /* Calculate size of each individual ring */ 234 /* Calculate size of each individual ring */
diff --git a/MAINTAINERS b/MAINTAINERS
index 4f56025da802..719e6279b80a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -911,11 +911,11 @@ F: arch/arm/include/asm/hardware/dec21285.h
911F: arch/arm/mach-footbridge/ 911F: arch/arm/mach-footbridge/
912 912
913ARM/FREESCALE IMX / MXC ARM ARCHITECTURE 913ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
914M: Shawn Guo <shawn.guo@linaro.org> 914M: Shawn Guo <shawn.guo@freescale.com>
915M: Sascha Hauer <kernel@pengutronix.de> 915M: Sascha Hauer <kernel@pengutronix.de>
916L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 916L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
917S: Maintained 917S: Maintained
918T: git git://git.linaro.org/people/shawnguo/linux-2.6.git 918T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
919F: arch/arm/mach-imx/ 919F: arch/arm/mach-imx/
920F: arch/arm/boot/dts/imx* 920F: arch/arm/boot/dts/imx*
921F: arch/arm/configs/imx*_defconfig 921F: arch/arm/configs/imx*_defconfig
@@ -1832,8 +1832,8 @@ F: net/bluetooth/
1832F: include/net/bluetooth/ 1832F: include/net/bluetooth/
1833 1833
1834BONDING DRIVER 1834BONDING DRIVER
1835M: Jay Vosburgh <fubar@us.ibm.com> 1835M: Jay Vosburgh <j.vosburgh@gmail.com>
1836M: Veaceslav Falico <vfalico@redhat.com> 1836M: Veaceslav Falico <vfalico@gmail.com>
1837M: Andy Gospodarek <andy@greyhouse.net> 1837M: Andy Gospodarek <andy@greyhouse.net>
1838L: netdev@vger.kernel.org 1838L: netdev@vger.kernel.org
1839W: http://sourceforge.net/projects/bonding/ 1839W: http://sourceforge.net/projects/bonding/
@@ -2801,9 +2801,9 @@ S: Supported
2801F: drivers/acpi/dock.c 2801F: drivers/acpi/dock.c
2802 2802
2803DOCUMENTATION 2803DOCUMENTATION
2804M: Rob Landley <rob@landley.net> 2804M: Randy Dunlap <rdunlap@infradead.org>
2805L: linux-doc@vger.kernel.org 2805L: linux-doc@vger.kernel.org
2806T: TBD 2806T: quilt http://www.infradead.org/~rdunlap/Doc/patches/
2807S: Maintained 2807S: Maintained
2808F: Documentation/ 2808F: Documentation/
2809 2809
@@ -4549,6 +4549,7 @@ M: Greg Rose <gregory.v.rose@intel.com>
4549M: Alex Duyck <alexander.h.duyck@intel.com> 4549M: Alex Duyck <alexander.h.duyck@intel.com>
4550M: John Ronciak <john.ronciak@intel.com> 4550M: John Ronciak <john.ronciak@intel.com>
4551M: Mitch Williams <mitch.a.williams@intel.com> 4551M: Mitch Williams <mitch.a.williams@intel.com>
4552M: Linux NICS <linux.nics@intel.com>
4552L: e1000-devel@lists.sourceforge.net 4553L: e1000-devel@lists.sourceforge.net
4553W: http://www.intel.com/support/feedback.htm 4554W: http://www.intel.com/support/feedback.htm
4554W: http://e1000.sourceforge.net/ 4555W: http://e1000.sourceforge.net/
@@ -6009,6 +6010,7 @@ F: include/uapi/linux/net.h
6009F: include/uapi/linux/netdevice.h 6010F: include/uapi/linux/netdevice.h
6010F: tools/net/ 6011F: tools/net/
6011F: tools/testing/selftests/net/ 6012F: tools/testing/selftests/net/
6013F: lib/random32.c
6012 6014
6013NETWORKING [IPv4/IPv6] 6015NETWORKING [IPv4/IPv6]
6014M: "David S. Miller" <davem@davemloft.net> 6016M: "David S. Miller" <davem@davemloft.net>
diff --git a/Makefile b/Makefile
index ef779ec26f62..e5ac8a62e6e5 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 14 2PATCHLEVEL = 14
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi
index 6c31c26e6cc0..db58cad6acd3 100644
--- a/arch/arm/boot/dts/sama5d36.dtsi
+++ b/arch/arm/boot/dts/sama5d36.dtsi
@@ -8,8 +8,8 @@
8 */ 8 */
9#include "sama5d3.dtsi" 9#include "sama5d3.dtsi"
10#include "sama5d3_can.dtsi" 10#include "sama5d3_can.dtsi"
11#include "sama5d3_emac.dtsi"
12#include "sama5d3_gmac.dtsi" 11#include "sama5d3_gmac.dtsi"
12#include "sama5d3_emac.dtsi"
13#include "sama5d3_lcd.dtsi" 13#include "sama5d3_lcd.dtsi"
14#include "sama5d3_mci2.dtsi" 14#include "sama5d3_mci2.dtsi"
15#include "sama5d3_tcb1.dtsi" 15#include "sama5d3_tcb1.dtsi"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dcae3a7035db..95fa1f1d5c8b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1776,12 +1776,12 @@ endchoice
1776 1776
1777config FORCE_MAX_ZONEORDER 1777config FORCE_MAX_ZONEORDER
1778 int "Maximum zone order" 1778 int "Maximum zone order"
1779 range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB 1779 range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
1780 default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB 1780 default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
1781 range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB 1781 range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
1782 default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB 1782 default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
1783 range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB 1783 range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
1784 default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB 1784 default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
1785 range 11 64 1785 range 11 64
1786 default "11" 1786 default "11"
1787 help 1787 help
@@ -2353,9 +2353,8 @@ config SECCOMP
2353 If unsure, say Y. Only embedded should say N here. 2353 If unsure, say Y. Only embedded should say N here.
2354 2354
2355config MIPS_O32_FP64_SUPPORT 2355config MIPS_O32_FP64_SUPPORT
2356 bool "Support for O32 binaries using 64-bit FP" 2356 bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
2357 depends on 32BIT || MIPS32_O32 2357 depends on 32BIT || MIPS32_O32
2358 default y
2359 help 2358 help
2360 When this is enabled, the kernel will support use of 64-bit floating 2359 When this is enabled, the kernel will support use of 64-bit floating
2361 point registers with binaries using the O32 ABI along with the 2360 point registers with binaries using the O32 ABI along with the
@@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT
2367 of your kernel & potentially improve FP emulation performance by 2366 of your kernel & potentially improve FP emulation performance by
2368 saying N here. 2367 saying N here.
2369 2368
2370 If unsure, say Y. 2369 Although binutils currently supports use of this flag the details
2370 concerning its effect upon the O32 ABI in userland are still being
2371 worked on. In order to avoid userland becoming dependant upon current
2372 behaviour before the details have been finalised, this option should
2373 be considered experimental and only enabled by those working upon
2374 said details.
2375
2376 If unsure, say N.
2371 2377
2372config USE_OF 2378config USE_OF
2373 bool 2379 bool
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index 9edc35ff8cf1..acf9a2a37f5a 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -53,10 +53,8 @@ void __init prom_init(void)
53 prom_init_cmdline(); 53 prom_init_cmdline();
54 54
55 memsize_str = prom_getenv("memsize"); 55 memsize_str = prom_getenv("memsize");
56 if (!memsize_str) 56 if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
57 memsize = 0x04000000; 57 memsize = 0x04000000;
58 else
59 strict_strtoul(memsize_str, 0, &memsize);
60 add_memory_region(0, memsize, BOOT_MEM_RAM); 58 add_memory_region(0, memsize, BOOT_MEM_RAM);
61} 59}
62 60
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 9969dbab19e3..25a59a23547e 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -52,10 +52,8 @@ void __init prom_init(void)
52 prom_init_cmdline(); 52 prom_init_cmdline();
53 53
54 memsize_str = prom_getenv("memsize"); 54 memsize_str = prom_getenv("memsize");
55 if (!memsize_str) 55 if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
56 memsize = 0x04000000; 56 memsize = 0x04000000;
57 else
58 strict_strtoul(memsize_str, 0, &memsize);
59 add_memory_region(0, memsize, BOOT_MEM_RAM); 57 add_memory_region(0, memsize, BOOT_MEM_RAM);
60} 58}
61 59
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 6d612e2b949b..cdd8246f92b3 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -1,3 +1,4 @@
1#include <linux/errno.h>
1#include <linux/export.h> 2#include <linux/export.h>
2#include <linux/string.h> 3#include <linux/string.h>
3#include <bcm47xx_board.h> 4#include <bcm47xx_board.h>
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 6decb27cf48b..2bed73a684ae 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -196,7 +196,7 @@ int bcm47xx_nvram_gpio_pin(const char *name)
196 char nvram_var[10]; 196 char nvram_var[10];
197 char buf[30]; 197 char buf[30];
198 198
199 for (i = 0; i < 16; i++) { 199 for (i = 0; i < 32; i++) {
200 err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i); 200 err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
201 if (err <= 0) 201 if (err <= 0)
202 continue; 202 continue;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 25fbfae06c1f..c2bb4f896ce7 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -975,10 +975,6 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
975 if (ciu > 1 || bit > 63) 975 if (ciu > 1 || bit > 63)
976 return -EINVAL; 976 return -EINVAL;
977 977
978 /* These are the GPIO lines */
979 if (ciu == 0 && bit >= 16 && bit < 32)
980 return -EINVAL;
981
982 *out_hwirq = (ciu << 6) | bit; 978 *out_hwirq = (ciu << 6) | bit;
983 *out_type = 0; 979 *out_type = 0;
984 980
@@ -1007,6 +1003,10 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
1007 if (!octeon_irq_virq_in_range(virq)) 1003 if (!octeon_irq_virq_in_range(virq))
1008 return -EINVAL; 1004 return -EINVAL;
1009 1005
1006 /* Don't map irq if it is reserved for GPIO. */
1007 if (line == 0 && bit >= 16 && bit <32)
1008 return 0;
1009
1010 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1010 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
1011 return -EINVAL; 1011 return -EINVAL;
1012 1012
@@ -1525,10 +1525,6 @@ static int octeon_irq_ciu2_xlat(struct irq_domain *d,
1525 ciu = intspec[0]; 1525 ciu = intspec[0];
1526 bit = intspec[1]; 1526 bit = intspec[1];
1527 1527
1528 /* Line 7 are the GPIO lines */
1529 if (ciu > 6 || bit > 63)
1530 return -EINVAL;
1531
1532 *out_hwirq = (ciu << 6) | bit; 1528 *out_hwirq = (ciu << 6) | bit;
1533 *out_type = 0; 1529 *out_type = 0;
1534 1530
@@ -1570,8 +1566,14 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
1570 if (!octeon_irq_virq_in_range(virq)) 1566 if (!octeon_irq_virq_in_range(virq))
1571 return -EINVAL; 1567 return -EINVAL;
1572 1568
1573 /* Line 7 are the GPIO lines */ 1569 /*
1574 if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0) 1570 * Don't map irq if it is reserved for GPIO.
1571 * (Line 7 are the GPIO lines.)
1572 */
1573 if (line == 7)
1574 return 0;
1575
1576 if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
1575 return -EINVAL; 1577 return -EINVAL;
1576 1578
1577 if (octeon_irq_ciu2_is_edge(line, bit)) 1579 if (octeon_irq_ciu2_is_edge(line, bit))
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 3220c93ea981..4225e99bd7bf 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -9,6 +9,7 @@
9#define _ASM_ASMMACRO_H 9#define _ASM_ASMMACRO_H
10 10
11#include <asm/hazards.h> 11#include <asm/hazards.h>
12#include <asm/asm-offsets.h>
12 13
13#ifdef CONFIG_32BIT 14#ifdef CONFIG_32BIT
14#include <asm/asmmacro-32.h> 15#include <asm/asmmacro-32.h>
@@ -54,11 +55,21 @@
54 .endm 55 .endm
55 56
56 .macro local_irq_disable reg=t0 57 .macro local_irq_disable reg=t0
58#ifdef CONFIG_PREEMPT
59 lw \reg, TI_PRE_COUNT($28)
60 addi \reg, \reg, 1
61 sw \reg, TI_PRE_COUNT($28)
62#endif
57 mfc0 \reg, CP0_STATUS 63 mfc0 \reg, CP0_STATUS
58 ori \reg, \reg, 1 64 ori \reg, \reg, 1
59 xori \reg, \reg, 1 65 xori \reg, \reg, 1
60 mtc0 \reg, CP0_STATUS 66 mtc0 \reg, CP0_STATUS
61 irq_disable_hazard 67 irq_disable_hazard
68#ifdef CONFIG_PREEMPT
69 lw \reg, TI_PRE_COUNT($28)
70 addi \reg, \reg, -1
71 sw \reg, TI_PRE_COUNT($28)
72#endif
62 .endm 73 .endm
63#endif /* CONFIG_MIPS_MT_SMTC */ 74#endif /* CONFIG_MIPS_MT_SMTC */
64 75
@@ -106,7 +117,7 @@
106 .endm 117 .endm
107 118
108 .macro fpu_save_double thread status tmp 119 .macro fpu_save_double thread status tmp
109#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) 120#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
110 sll \tmp, \status, 5 121 sll \tmp, \status, 5
111 bgez \tmp, 10f 122 bgez \tmp, 10f
112 fpu_save_16odd \thread 123 fpu_save_16odd \thread
@@ -159,7 +170,7 @@
159 .endm 170 .endm
160 171
161 .macro fpu_restore_double thread status tmp 172 .macro fpu_restore_double thread status tmp
162#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) 173#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
163 sll \tmp, \status, 5 174 sll \tmp, \status, 5
164 bgez \tmp, 10f # 16 register mode? 175 bgez \tmp, 10f # 16 register mode?
165 176
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 6b9749540edf..58e50cbdb1a6 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
57 return 0; 57 return 0;
58 58
59 case FPU_64BIT: 59 case FPU_64BIT:
60#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64)) 60#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
61 /* we only have a 32-bit FPU */ 61 /* we only have a 32-bit FPU */
62 return SIGFPE; 62 return SIGFPE;
63#endif 63#endif
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index ce35c9af0c28..992aaba603b5 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -22,12 +22,12 @@ extern void _mcount(void);
22#define safe_load(load, src, dst, error) \ 22#define safe_load(load, src, dst, error) \
23do { \ 23do { \
24 asm volatile ( \ 24 asm volatile ( \
25 "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\ 25 "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
26 " li %[" STR(error) "], 0\n" \ 26 " li %[tmp_err], 0\n" \
27 "2:\n" \ 27 "2:\n" \
28 \ 28 \
29 ".section .fixup, \"ax\"\n" \ 29 ".section .fixup, \"ax\"\n" \
30 "3: li %[" STR(error) "], 1\n" \ 30 "3: li %[tmp_err], 1\n" \
31 " j 2b\n" \ 31 " j 2b\n" \
32 ".previous\n" \ 32 ".previous\n" \
33 \ 33 \
@@ -35,8 +35,8 @@ do { \
35 STR(PTR) "\t1b, 3b\n\t" \ 35 STR(PTR) "\t1b, 3b\n\t" \
36 ".previous\n" \ 36 ".previous\n" \
37 \ 37 \
38 : [dst] "=&r" (dst), [error] "=r" (error)\ 38 : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
39 : [src] "r" (src) \ 39 : [tmp_src] "r" (src) \
40 : "memory" \ 40 : "memory" \
41 ); \ 41 ); \
42} while (0) 42} while (0)
@@ -44,12 +44,12 @@ do { \
44#define safe_store(store, src, dst, error) \ 44#define safe_store(store, src, dst, error) \
45do { \ 45do { \
46 asm volatile ( \ 46 asm volatile ( \
47 "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\ 47 "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
48 " li %[" STR(error) "], 0\n" \ 48 " li %[tmp_err], 0\n" \
49 "2:\n" \ 49 "2:\n" \
50 \ 50 \
51 ".section .fixup, \"ax\"\n" \ 51 ".section .fixup, \"ax\"\n" \
52 "3: li %[" STR(error) "], 1\n" \ 52 "3: li %[tmp_err], 1\n" \
53 " j 2b\n" \ 53 " j 2b\n" \
54 ".previous\n" \ 54 ".previous\n" \
55 \ 55 \
@@ -57,8 +57,8 @@ do { \
57 STR(PTR) "\t1b, 3b\n\t" \ 57 STR(PTR) "\t1b, 3b\n\t" \
58 ".previous\n" \ 58 ".previous\n" \
59 \ 59 \
60 : [error] "=r" (error) \ 60 : [tmp_err] "=r" (error) \
61 : [dst] "r" (dst), [src] "r" (src)\ 61 : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
62 : "memory" \ 62 : "memory" \
63 ); \ 63 ); \
64} while (0) 64} while (0)
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 33e8dbfc1b63..f35b131977e6 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -13,6 +13,7 @@
13#ifndef __ASM_MIPS_SYSCALL_H 13#ifndef __ASM_MIPS_SYSCALL_H
14#define __ASM_MIPS_SYSCALL_H 14#define __ASM_MIPS_SYSCALL_H
15 15
16#include <linux/compiler.h>
16#include <linux/audit.h> 17#include <linux/audit.h>
17#include <linux/elf-em.h> 18#include <linux/elf-em.h>
18#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
39 40
40#ifdef CONFIG_32BIT 41#ifdef CONFIG_32BIT
41 case 4: case 5: case 6: case 7: 42 case 4: case 5: case 6: case 7:
42 return get_user(*arg, (int *)usp + 4 * n); 43 return get_user(*arg, (int *)usp + n);
43#endif 44#endif
44 45
45#ifdef CONFIG_64BIT 46#ifdef CONFIG_64BIT
46 case 4: case 5: case 6: case 7: 47 case 4: case 5: case 6: case 7:
47#ifdef CONFIG_MIPS32_O32 48#ifdef CONFIG_MIPS32_O32
48 if (test_thread_flag(TIF_32BIT_REGS)) 49 if (test_thread_flag(TIF_32BIT_REGS))
49 return get_user(*arg, (int *)usp + 4 * n); 50 return get_user(*arg, (int *)usp + n);
50 else 51 else
51#endif 52#endif
52 *arg = regs->regs[4 + n]; 53 *arg = regs->regs[4 + n];
@@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
57 default: 58 default:
58 BUG(); 59 BUG();
59 } 60 }
61
62 unreachable();
60} 63}
61 64
62static inline long syscall_get_return_value(struct task_struct *task, 65static inline long syscall_get_return_value(struct task_struct *task,
@@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
83 unsigned int i, unsigned int n, 86 unsigned int i, unsigned int n,
84 unsigned long *args) 87 unsigned long *args)
85{ 88{
86 unsigned long arg;
87 int ret; 89 int ret;
88 90
89 while (n--) 91 while (n--)
90 ret |= mips_get_syscall_arg(&arg, task, regs, i++); 92 ret |= mips_get_syscall_arg(args++, task, regs, i++);
91 93
92 /* 94 /*
93 * No way to communicate an error because this is a void function. 95 * No way to communicate an error because this is a void function.
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index b39ba25b41cc..f25181b19941 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -163,8 +163,8 @@ enum cop1_sdw_func {
163 */ 163 */
164enum cop1x_func { 164enum cop1x_func {
165 lwxc1_op = 0x00, ldxc1_op = 0x01, 165 lwxc1_op = 0x00, ldxc1_op = 0x01,
166 pfetch_op = 0x07, swxc1_op = 0x08, 166 swxc1_op = 0x08, sdxc1_op = 0x09,
167 sdxc1_op = 0x09, madd_s_op = 0x20, 167 pfetch_op = 0x0f, madd_s_op = 0x20,
168 madd_d_op = 0x21, madd_e_op = 0x22, 168 madd_d_op = 0x21, madd_e_op = 0x22,
169 msub_s_op = 0x28, msub_d_op = 0x29, 169 msub_s_op = 0x28, msub_d_op = 0x29,
170 msub_e_op = 0x2a, nmadd_s_op = 0x30, 170 msub_e_op = 0x2a, nmadd_s_op = 0x30,
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba258361b..374ed74cd516 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111 safe_store_code(new_code1, ip, faulted); 111 safe_store_code(new_code1, ip, faulted);
112 if (unlikely(faulted)) 112 if (unlikely(faulted))
113 return -EFAULT; 113 return -EFAULT;
114 ip += 4; 114 safe_store_code(new_code2, ip + 4, faulted);
115 safe_store_code(new_code2, ip, faulted);
116 if (unlikely(faulted)) 115 if (unlikely(faulted))
117 return -EFAULT; 116 return -EFAULT;
118 flush_icache_range(ip, ip + 8); /* original ip + 12 */ 117 flush_icache_range(ip, ip + 8);
119 return 0; 118 return 0;
120} 119}
121#endif 120#endif
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 253b2fb52026..73b0ddf910d4 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,9 +35,9 @@
35LEAF(_save_fp_context) 35LEAF(_save_fp_context)
36 cfc1 t1, fcr31 36 cfc1 t1, fcr31
37 37
38#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) 38#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
39 .set push 39 .set push
40#ifdef CONFIG_MIPS32_R2 40#ifdef CONFIG_CPU_MIPS32_R2
41 .set mips64r2 41 .set mips64r2
42 mfc0 t0, CP0_STATUS 42 mfc0 t0, CP0_STATUS
43 sll t0, t0, 5 43 sll t0, t0, 5
@@ -146,11 +146,11 @@ LEAF(_save_fp_context32)
146 * - cp1 status/control register 146 * - cp1 status/control register
147 */ 147 */
148LEAF(_restore_fp_context) 148LEAF(_restore_fp_context)
149 EX lw t0, SC_FPC_CSR(a0) 149 EX lw t1, SC_FPC_CSR(a0)
150 150
151#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) 151#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
152 .set push 152 .set push
153#ifdef CONFIG_MIPS32_R2 153#ifdef CONFIG_CPU_MIPS32_R2
154 .set mips64r2 154 .set mips64r2
155 mfc0 t0, CP0_STATUS 155 mfc0 t0, CP0_STATUS
156 sll t0, t0, 5 156 sll t0, t0, 5
@@ -191,7 +191,7 @@ LEAF(_restore_fp_context)
191 EX ldc1 $f26, SC_FPREGS+208(a0) 191 EX ldc1 $f26, SC_FPREGS+208(a0)
192 EX ldc1 $f28, SC_FPREGS+224(a0) 192 EX ldc1 $f28, SC_FPREGS+224(a0)
193 EX ldc1 $f30, SC_FPREGS+240(a0) 193 EX ldc1 $f30, SC_FPREGS+240(a0)
194 ctc1 t0, fcr31 194 ctc1 t1, fcr31
195 jr ra 195 jr ra
196 li v0, 0 # success 196 li v0, 0 # success
197 END(_restore_fp_context) 197 END(_restore_fp_context)
@@ -199,7 +199,7 @@ LEAF(_restore_fp_context)
199#ifdef CONFIG_MIPS32_COMPAT 199#ifdef CONFIG_MIPS32_COMPAT
200LEAF(_restore_fp_context32) 200LEAF(_restore_fp_context32)
201 /* Restore an o32 sigcontext. */ 201 /* Restore an o32 sigcontext. */
202 EX lw t0, SC32_FPC_CSR(a0) 202 EX lw t1, SC32_FPC_CSR(a0)
203 203
204 mfc0 t0, CP0_STATUS 204 mfc0 t0, CP0_STATUS
205 sll t0, t0, 5 205 sll t0, t0, 5
@@ -239,7 +239,7 @@ LEAF(_restore_fp_context32)
239 EX ldc1 $f26, SC32_FPREGS+208(a0) 239 EX ldc1 $f26, SC32_FPREGS+208(a0)
240 EX ldc1 $f28, SC32_FPREGS+224(a0) 240 EX ldc1 $f28, SC32_FPREGS+224(a0)
241 EX ldc1 $f30, SC32_FPREGS+240(a0) 241 EX ldc1 $f30, SC32_FPREGS+240(a0)
242 ctc1 t0, fcr31 242 ctc1 t1, fcr31
243 jr ra 243 jr ra
244 li v0, 0 # success 244 li v0, 0 # success
245 END(_restore_fp_context32) 245 END(_restore_fp_context32)
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 56dc69635153..758fb3cd2326 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void)
112 112
113 for (i = 0; i < RTLX_CHANNELS; i++) 113 for (i = 0; i < RTLX_CHANNELS; i++)
114 device_destroy(mt_class, MKDEV(major, i)); 114 device_destroy(mt_class, MKDEV(major, i));
115
115 unregister_chrdev(major, RTLX_MODULE_NAME); 116 unregister_chrdev(major, RTLX_MODULE_NAME);
117
118 aprp_hook = NULL;
116} 119}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 91d61ba422b4..9c1aca00fd54 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void)
144 144
145 for (i = 0; i < RTLX_CHANNELS; i++) 145 for (i = 0; i < RTLX_CHANNELS; i++)
146 device_destroy(mt_class, MKDEV(major, i)); 146 device_destroy(mt_class, MKDEV(major, i));
147
147 unregister_chrdev(major, RTLX_MODULE_NAME); 148 unregister_chrdev(major, RTLX_MODULE_NAME);
149
150 aprp_hook = NULL;
148} 151}
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 506925b2c3f3..0b4e2e38294b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1538 break; 1538 break;
1539 } 1539 }
1540 1540
1541 case 0x7: /* 7 */ 1541 case 0x3:
1542 if (MIPSInst_FUNC(ir) != pfetch_op) { 1542 if (MIPSInst_FUNC(ir) != pfetch_op)
1543 return SIGILL; 1543 return SIGILL;
1544 } 1544
1545 /* ignore prefx operation */ 1545 /* ignore prefx operation */
1546 break; 1546 break;
1547 1547
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 592ac0427426..84ac523b0ce0 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -72,7 +72,7 @@ int amon_cpu_start(int cpu,
72 return 0; 72 return 0;
73} 73}
74 74
75#ifdef CONFIG_MIPS_VPE_LOADER 75#ifdef CONFIG_MIPS_VPE_LOADER_CMP
76int vpe_run(struct vpe *v) 76int vpe_run(struct vpe *v)
77{ 77{
78 struct vpe_notifications *n; 78 struct vpe_notifications *n;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index ca3e3a46a42f..2242181a6284 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void)
119 119
120 do_IRQ(MALTA_INT_BASE + irq); 120 do_IRQ(MALTA_INT_BASE + irq);
121 121
122#ifdef MIPS_VPE_APSP_API 122#ifdef CONFIG_MIPS_VPE_APSP_API_MT
123 if (aprp_hook) 123 if (aprp_hook)
124 aprp_hook(); 124 aprp_hook();
125#endif 125#endif
@@ -310,7 +310,7 @@ static void ipi_call_dispatch(void)
310 310
311static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 311static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
312{ 312{
313#ifdef MIPS_VPE_APSP_API 313#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
314 if (aprp_hook) 314 if (aprp_hook)
315 aprp_hook(); 315 aprp_hook();
316#endif 316#endif
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index d37be36dc659..2b91b0e61566 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -150,6 +150,7 @@ msi_irq_allocated:
150 msg.address_lo = 150 msg.address_lo =
151 ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; 151 ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
152 msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; 152 msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
153 break;
153 case OCTEON_DMA_BAR_TYPE_BIG: 154 case OCTEON_DMA_BAR_TYPE_BIG:
154 /* When using big bar, Bar 0 is based at 0 */ 155 /* When using big bar, Bar 0 is based at 0 */
155 msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; 156 msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 637fe031aa84..60d5d174dfe4 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -32,17 +32,6 @@ void copy_page_asm(void *to, void *from);
32void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 32void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
33 struct page *pg); 33 struct page *pg);
34 34
35/* #define CONFIG_PARISC_TMPALIAS */
36
37#ifdef CONFIG_PARISC_TMPALIAS
38void clear_user_highpage(struct page *page, unsigned long vaddr);
39#define clear_user_highpage clear_user_highpage
40struct vm_area_struct;
41void copy_user_highpage(struct page *to, struct page *from,
42 unsigned long vaddr, struct vm_area_struct *vma);
43#define __HAVE_ARCH_COPY_USER_HIGHPAGE
44#endif
45
46/* 35/*
47 * These are used to make use of C type-checking.. 36 * These are used to make use of C type-checking..
48 */ 37 */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 3516e0b27044..64f2992e439f 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -191,8 +191,4 @@ static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
191#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 191#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
192#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 192#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
193 193
194#define arch_spin_relax(lock) cpu_relax()
195#define arch_read_relax(lock) cpu_relax()
196#define arch_write_relax(lock) cpu_relax()
197
198#endif /* __ASM_SPINLOCK_H */ 194#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 42706794a36f..265ae5190b0a 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -828,13 +828,13 @@
828#define __NR_finit_module (__NR_Linux + 333) 828#define __NR_finit_module (__NR_Linux + 333)
829#define __NR_sched_setattr (__NR_Linux + 334) 829#define __NR_sched_setattr (__NR_Linux + 334)
830#define __NR_sched_getattr (__NR_Linux + 335) 830#define __NR_sched_getattr (__NR_Linux + 335)
831#define __NR_utimes (__NR_Linux + 336)
831 832
832#define __NR_Linux_syscalls (__NR_sched_getattr + 1) 833#define __NR_Linux_syscalls (__NR_utimes + 1)
833 834
834 835
835#define __IGNORE_select /* newselect */ 836#define __IGNORE_select /* newselect */
836#define __IGNORE_fadvise64 /* fadvise64_64 */ 837#define __IGNORE_fadvise64 /* fadvise64_64 */
837#define __IGNORE_utimes /* utime */
838 838
839 839
840#define HPUX_GATEWAY_ADDR 0xC0000004 840#define HPUX_GATEWAY_ADDR 0xC0000004
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index ac87a40502e6..a6ffc775a9f8 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -581,67 +581,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
581 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 581 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
582 } 582 }
583} 583}
584
585#ifdef CONFIG_PARISC_TMPALIAS
586
587void clear_user_highpage(struct page *page, unsigned long vaddr)
588{
589 void *vto;
590 unsigned long flags;
591
592 /* Clear using TMPALIAS region. The page doesn't need to
593 be flushed but the kernel mapping needs to be purged. */
594
595 vto = kmap_atomic(page);
596
597 /* The PA-RISC 2.0 Architecture book states on page F-6:
598 "Before a write-capable translation is enabled, *all*
599 non-equivalently-aliased translations must be removed
600 from the page table and purged from the TLB. (Note
601 that the caches are not required to be flushed at this
602 time.) Before any non-equivalent aliased translation
603 is re-enabled, the virtual address range for the writeable
604 page (the entire page) must be flushed from the cache,
605 and the write-capable translation removed from the page
606 table and purged from the TLB." */
607
608 purge_kernel_dcache_page_asm((unsigned long)vto);
609 purge_tlb_start(flags);
610 pdtlb_kernel(vto);
611 purge_tlb_end(flags);
612 preempt_disable();
613 clear_user_page_asm(vto, vaddr);
614 preempt_enable();
615
616 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
617}
618
619void copy_user_highpage(struct page *to, struct page *from,
620 unsigned long vaddr, struct vm_area_struct *vma)
621{
622 void *vfrom, *vto;
623 unsigned long flags;
624
625 /* Copy using TMPALIAS region. This has the advantage
626 that the `from' page doesn't need to be flushed. However,
627 the `to' page must be flushed in copy_user_page_asm since
628 it can be used to bring in executable code. */
629
630 vfrom = kmap_atomic(from);
631 vto = kmap_atomic(to);
632
633 purge_kernel_dcache_page_asm((unsigned long)vto);
634 purge_tlb_start(flags);
635 pdtlb_kernel(vto);
636 pdtlb_kernel(vfrom);
637 purge_tlb_end(flags);
638 preempt_disable();
639 copy_user_page_asm(vto, vfrom, vaddr);
640 flush_dcache_page_asm(__pa(vto), vaddr);
641 preempt_enable();
642
643 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
644 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
645}
646
647#endif /* CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 8fa3fbb3e4d3..80e5dd248934 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -431,6 +431,7 @@
431 ENTRY_SAME(finit_module) 431 ENTRY_SAME(finit_module)
432 ENTRY_SAME(sched_setattr) 432 ENTRY_SAME(sched_setattr)
433 ENTRY_SAME(sched_getattr) /* 335 */ 433 ENTRY_SAME(sched_getattr) /* 335 */
434 ENTRY_COMP(utimes)
434 435
435 /* Nothing yet */ 436 /* Nothing yet */
436 437
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e66d4ec04d95..818dce344e82 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
15041: addi r8,r8,16 15041: addi r8,r8,16
1505 .endr 1505 .endr
1506 1506
1507 /* Save DEC */
1508 mfspr r5,SPRN_DEC
1509 mftb r6
1510 extsw r5,r5
1511 add r5,r5,r6
1512 std r5,VCPU_DEC_EXPIRES(r9)
1513
1514BEGIN_FTR_SECTION
1515 b 8f
1516END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1517 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1518 mfmsr r8
1519 li r0, 1
1520 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1521 mtmsrd r8
1522
1523 /* Save POWER8-specific registers */
1524 mfspr r5, SPRN_IAMR
1525 mfspr r6, SPRN_PSPB
1526 mfspr r7, SPRN_FSCR
1527 std r5, VCPU_IAMR(r9)
1528 stw r6, VCPU_PSPB(r9)
1529 std r7, VCPU_FSCR(r9)
1530 mfspr r5, SPRN_IC
1531 mfspr r6, SPRN_VTB
1532 mfspr r7, SPRN_TAR
1533 std r5, VCPU_IC(r9)
1534 std r6, VCPU_VTB(r9)
1535 std r7, VCPU_TAR(r9)
1536#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1537 mfspr r5, SPRN_TFHAR
1538 mfspr r6, SPRN_TFIAR
1539 mfspr r7, SPRN_TEXASR
1540 std r5, VCPU_TFHAR(r9)
1541 std r6, VCPU_TFIAR(r9)
1542 std r7, VCPU_TEXASR(r9)
1543#endif
1544 mfspr r8, SPRN_EBBHR
1545 std r8, VCPU_EBBHR(r9)
1546 mfspr r5, SPRN_EBBRR
1547 mfspr r6, SPRN_BESCR
1548 mfspr r7, SPRN_CSIGR
1549 mfspr r8, SPRN_TACR
1550 std r5, VCPU_EBBRR(r9)
1551 std r6, VCPU_BESCR(r9)
1552 std r7, VCPU_CSIGR(r9)
1553 std r8, VCPU_TACR(r9)
1554 mfspr r5, SPRN_TCSCR
1555 mfspr r6, SPRN_ACOP
1556 mfspr r7, SPRN_PID
1557 mfspr r8, SPRN_WORT
1558 std r5, VCPU_TCSCR(r9)
1559 std r6, VCPU_ACOP(r9)
1560 stw r7, VCPU_GUEST_PID(r9)
1561 std r8, VCPU_WORT(r9)
15628:
1563
1564 /* Save and reset AMR and UAMOR before turning on the MMU */
1565BEGIN_FTR_SECTION
1566 mfspr r5,SPRN_AMR
1567 mfspr r6,SPRN_UAMOR
1568 std r5,VCPU_AMR(r9)
1569 std r6,VCPU_UAMOR(r9)
1570 li r6,0
1571 mtspr SPRN_AMR,r6
1572END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1573
1574 /* Unset guest mode */ 1507 /* Unset guest mode */
1575 li r0, KVM_GUEST_MODE_NONE 1508 li r0, KVM_GUEST_MODE_NONE
1576 stb r0, HSTATE_IN_GUEST(r13) 1509 stb r0, HSTATE_IN_GUEST(r13)
@@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION
2203END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2136END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2204#endif 2137#endif
2205 mfspr r6,SPRN_VRSAVE 2138 mfspr r6,SPRN_VRSAVE
2206 stw r6,VCPU_VRSAVE(r3) 2139 stw r6,VCPU_VRSAVE(r31)
2207 mtlr r30 2140 mtlr r30
2208 mtmsrd r5 2141 mtmsrd r5
2209 isync 2142 isync
@@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION
2240 bl .load_vr_state 2173 bl .load_vr_state
2241END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2174END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2242#endif 2175#endif
2243 lwz r7,VCPU_VRSAVE(r4) 2176 lwz r7,VCPU_VRSAVE(r31)
2244 mtspr SPRN_VRSAVE,r7 2177 mtspr SPRN_VRSAVE,r7
2245 mtlr r30 2178 mtlr r30
2246 mr r4,r31 2179 mr r4,r31
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 32a280ec38c1..d7b4967f8fa6 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -58,9 +58,12 @@ void arch_cpu_idle(void)
58{ 58{
59 if (tlb_type != hypervisor) { 59 if (tlb_type != hypervisor) {
60 touch_nmi_watchdog(); 60 touch_nmi_watchdog();
61 local_irq_enable();
61 } else { 62 } else {
62 unsigned long pstate; 63 unsigned long pstate;
63 64
65 local_irq_enable();
66
64 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over 67 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
65 * the cpu sleep hypervisor call. 68 * the cpu sleep hypervisor call.
66 */ 69 */
@@ -82,7 +85,6 @@ void arch_cpu_idle(void)
82 : "=&r" (pstate) 85 : "=&r" (pstate)
83 : "i" (PSTATE_IE)); 86 : "i" (PSTATE_IE));
84 } 87 }
85 local_irq_enable();
86} 88}
87 89
88#ifdef CONFIG_HOTPLUG_CPU 90#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 87729fff13b9..33a17e7b3ccd 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -189,7 +189,8 @@ linux_sparc_syscall32:
189 mov %i0, %l5 ! IEU1 189 mov %i0, %l5 ! IEU1
1905: call %l7 ! CTI Group brk forced 1905: call %l7 ! CTI Group brk forced
191 srl %i5, 0, %o5 ! IEU1 191 srl %i5, 0, %o5 ! IEU1
192 ba,a,pt %xcc, 3f 192 ba,pt %xcc, 3f
193 sra %o0, 0, %o0
193 194
194 /* Linux native system calls enter here... */ 195 /* Linux native system calls enter here... */
195 .align 32 196 .align 32
@@ -217,7 +218,6 @@ linux_sparc_syscall:
2173: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] 2183: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
218ret_sys_call: 219ret_sys_call:
219 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 220 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
220 sra %o0, 0, %o0
221 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 221 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
222 sllx %g2, 32, %g2 222 sllx %g2, 32, %g2
223 223
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 3b3a360b429a..f5d506fdddad 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -273,7 +273,7 @@ void __init pgtable_cache_init(void)
273 prom_halt(); 273 prom_halt();
274 } 274 }
275 275
276 for (i = 0; i < 8; i++) { 276 for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
277 unsigned long size = 8192 << i; 277 unsigned long size = 8192 << i;
278 const char *name = tsb_cache_names[i]; 278 const char *name = tsb_cache_names[i];
279 279
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 5ad38ad07890..bbc8b12fa443 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
445 return a.pte == b.pte; 445 return a.pte == b.pte;
446} 446}
447 447
448static inline int pteval_present(pteval_t pteval)
449{
450 /*
451 * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
452 * way clearly states that the intent is that protnone and numa
453 * hinting ptes are considered present for the purposes of
454 * pagetable operations like zapping, protection changes, gup etc.
455 */
456 return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
457}
458
459static inline int pte_present(pte_t a) 448static inline int pte_present(pte_t a)
460{ 449{
461 return pteval_present(pte_flags(a)); 450 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
451 _PAGE_NUMA);
462} 452}
463 453
464#define pte_accessible pte_accessible 454#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index d35f24e231cd..1306d117967d 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
119 119
120extern const struct cpumask *cpu_coregroup_mask(int cpu); 120extern const struct cpumask *cpu_coregroup_mask(int cpu);
121 121
122#ifdef ENABLE_TOPO_DEFINES
123#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) 122#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
124#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 123#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
124
125#ifdef ENABLE_TOPO_DEFINES
125#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 126#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
126#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 127#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
127#endif 128#endif
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index fd972a3e4cbb..9fa8aa051f54 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -18,7 +18,6 @@
18#include <linux/pci_ids.h> 18#include <linux/pci_ids.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/ioport.h>
22#include <linux/suspend.h> 21#include <linux/suspend.h>
23#include <asm/e820.h> 22#include <asm/e820.h>
24#include <asm/io.h> 23#include <asm/io.h>
@@ -54,18 +53,6 @@ int fallback_aper_force __initdata;
54 53
55int fix_aperture __initdata = 1; 54int fix_aperture __initdata = 1;
56 55
57static struct resource gart_resource = {
58 .name = "GART",
59 .flags = IORESOURCE_MEM,
60};
61
62static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
63{
64 gart_resource.start = aper_base;
65 gart_resource.end = aper_base + aper_size - 1;
66 insert_resource(&iomem_resource, &gart_resource);
67}
68
69/* This code runs before the PCI subsystem is initialized, so just 56/* This code runs before the PCI subsystem is initialized, so just
70 access the northbridge directly. */ 57 access the northbridge directly. */
71 58
@@ -96,7 +83,6 @@ static u32 __init allocate_aperture(void)
96 memblock_reserve(addr, aper_size); 83 memblock_reserve(addr, aper_size);
97 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", 84 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
98 aper_size >> 10, addr); 85 aper_size >> 10, addr);
99 insert_aperture_resource((u32)addr, aper_size);
100 register_nosave_region(addr >> PAGE_SHIFT, 86 register_nosave_region(addr >> PAGE_SHIFT,
101 (addr+aper_size) >> PAGE_SHIFT); 87 (addr+aper_size) >> PAGE_SHIFT);
102 88
@@ -444,12 +430,8 @@ int __init gart_iommu_hole_init(void)
444 430
445out: 431out:
446 if (!fix && !fallback_aper_force) { 432 if (!fix && !fallback_aper_force) {
447 if (last_aper_base) { 433 if (last_aper_base)
448 unsigned long n = (32 * 1024 * 1024) << last_aper_order;
449
450 insert_aperture_resource((u32)last_aper_base, n);
451 return 1; 434 return 1;
452 }
453 return 0; 435 return 0;
454 } 436 }
455 437
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 256282e7888b..2423ef04ffea 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
365/* Assume pteval_t is equivalent to all the other *val_t types. */ 365/* Assume pteval_t is equivalent to all the other *val_t types. */
366static pteval_t pte_mfn_to_pfn(pteval_t val) 366static pteval_t pte_mfn_to_pfn(pteval_t val)
367{ 367{
368 if (pteval_present(val)) { 368 if (val & _PAGE_PRESENT) {
369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 369 unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
370 unsigned long pfn = mfn_to_pfn(mfn); 370 unsigned long pfn = mfn_to_pfn(mfn);
371 371
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
381 381
382static pteval_t pte_pfn_to_mfn(pteval_t val) 382static pteval_t pte_pfn_to_mfn(pteval_t val)
383{ 383{
384 if (pteval_present(val)) { 384 if (val & _PAGE_PRESENT) {
385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; 385 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
386 pteval_t flags = val & PTE_FLAGS_MASK; 386 pteval_t flags = val & PTE_FLAGS_MASK;
387 unsigned long mfn; 387 unsigned long mfn;
diff --git a/block/blk-core.c b/block/blk-core.c
index 853f92749202..bfe16d5af9f9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
693 if (!uninit_q) 693 if (!uninit_q)
694 return NULL; 694 return NULL;
695 695
696 uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
697 if (!uninit_q->flush_rq)
698 goto out_cleanup_queue;
699
700 q = blk_init_allocated_queue(uninit_q, rfn, lock); 696 q = blk_init_allocated_queue(uninit_q, rfn, lock);
701 if (!q) 697 if (!q)
702 goto out_free_flush_rq; 698 blk_cleanup_queue(uninit_q);
703 return q;
704 699
705out_free_flush_rq: 700 return q;
706 kfree(uninit_q->flush_rq);
707out_cleanup_queue:
708 blk_cleanup_queue(uninit_q);
709 return NULL;
710} 701}
711EXPORT_SYMBOL(blk_init_queue_node); 702EXPORT_SYMBOL(blk_init_queue_node);
712 703
@@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
717 if (!q) 708 if (!q)
718 return NULL; 709 return NULL;
719 710
720 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) 711 q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
712 if (!q->flush_rq)
721 return NULL; 713 return NULL;
722 714
715 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
716 goto fail;
717
723 q->request_fn = rfn; 718 q->request_fn = rfn;
724 q->prep_rq_fn = NULL; 719 q->prep_rq_fn = NULL;
725 q->unprep_rq_fn = NULL; 720 q->unprep_rq_fn = NULL;
@@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
742 /* init elevator */ 737 /* init elevator */
743 if (elevator_init(q, NULL)) { 738 if (elevator_init(q, NULL)) {
744 mutex_unlock(&q->sysfs_lock); 739 mutex_unlock(&q->sysfs_lock);
745 return NULL; 740 goto fail;
746 } 741 }
747 742
748 mutex_unlock(&q->sysfs_lock); 743 mutex_unlock(&q->sysfs_lock);
749 744
750 return q; 745 return q;
746
747fail:
748 kfree(q->flush_rq);
749 return NULL;
751} 750}
752EXPORT_SYMBOL(blk_init_allocated_queue); 751EXPORT_SYMBOL(blk_init_allocated_queue);
753 752
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f598f794c3c6..43e6b4755e9a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -140,14 +140,17 @@ static void mq_flush_run(struct work_struct *work)
140 blk_mq_insert_request(rq, false, true, false); 140 blk_mq_insert_request(rq, false, true, false);
141} 141}
142 142
143static bool blk_flush_queue_rq(struct request *rq) 143static bool blk_flush_queue_rq(struct request *rq, bool add_front)
144{ 144{
145 if (rq->q->mq_ops) { 145 if (rq->q->mq_ops) {
146 INIT_WORK(&rq->mq_flush_work, mq_flush_run); 146 INIT_WORK(&rq->mq_flush_work, mq_flush_run);
147 kblockd_schedule_work(rq->q, &rq->mq_flush_work); 147 kblockd_schedule_work(rq->q, &rq->mq_flush_work);
148 return false; 148 return false;
149 } else { 149 } else {
150 list_add_tail(&rq->queuelist, &rq->q->queue_head); 150 if (add_front)
151 list_add(&rq->queuelist, &rq->q->queue_head);
152 else
153 list_add_tail(&rq->queuelist, &rq->q->queue_head);
151 return true; 154 return true;
152 } 155 }
153} 156}
@@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
193 196
194 case REQ_FSEQ_DATA: 197 case REQ_FSEQ_DATA:
195 list_move_tail(&rq->flush.list, &q->flush_data_in_flight); 198 list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
196 queued = blk_flush_queue_rq(rq); 199 queued = blk_flush_queue_rq(rq, true);
197 break; 200 break;
198 201
199 case REQ_FSEQ_DONE: 202 case REQ_FSEQ_DONE:
@@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q)
326 q->flush_rq->rq_disk = first_rq->rq_disk; 329 q->flush_rq->rq_disk = first_rq->rq_disk;
327 q->flush_rq->end_io = flush_end_io; 330 q->flush_rq->end_io = flush_end_io;
328 331
329 return blk_flush_queue_rq(q->flush_rq); 332 return blk_flush_queue_rq(q->flush_rq, false);
330} 333}
331 334
332static void flush_data_end_io(struct request *rq, int error) 335static void flush_data_end_io(struct request *rq, int error)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 516026954be6..d777bb7cea93 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4498,7 +4498,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
4498 } 4498 }
4499 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", 4499 dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
4500 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), 4500 my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
4501 cpu_to_node(smp_processor_id()), smp_processor_id()); 4501 cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
4502 4502
4503 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); 4503 dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
4504 if (dd == NULL) { 4504 if (dd == NULL) {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b365e0dfccb6..34898d53395b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2109,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2109 rbd_assert(img_request->obj_request_count > 0); 2109 rbd_assert(img_request->obj_request_count > 0);
2110 rbd_assert(which != BAD_WHICH); 2110 rbd_assert(which != BAD_WHICH);
2111 rbd_assert(which < img_request->obj_request_count); 2111 rbd_assert(which < img_request->obj_request_count);
2112 rbd_assert(which >= img_request->next_completion);
2113 2112
2114 spin_lock_irq(&img_request->completion_lock); 2113 spin_lock_irq(&img_request->completion_lock);
2115 if (which != img_request->next_completion) 2114 if (which != img_request->next_completion)
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 02821b06a39e..a918bc481c52 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
54 54
55static u64 pit_read_sched_clock(void) 55static u64 pit_read_sched_clock(void)
56{ 56{
57 return __raw_readl(clksrc_base + PITCVAL); 57 return ~__raw_readl(clksrc_base + PITCVAL);
58} 58}
59 59
60static int __init pit_clocksource_init(unsigned long rate) 60static int __init pit_clocksource_init(unsigned long rate)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d04786db9627..195fe5bc0aac 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -561,7 +561,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
561{ 561{
562 struct drm_info_node *node = (struct drm_info_node *) m->private; 562 struct drm_info_node *node = (struct drm_info_node *) m->private;
563 struct drm_device *dev = node->minor->dev; 563 struct drm_device *dev = node->minor->dev;
564 drm_i915_private_t *dev_priv = dev->dev_private; 564 struct drm_i915_private *dev_priv = dev->dev_private;
565 struct intel_ring_buffer *ring; 565 struct intel_ring_buffer *ring;
566 struct drm_i915_gem_request *gem_request; 566 struct drm_i915_gem_request *gem_request;
567 int ret, count, i; 567 int ret, count, i;
@@ -606,7 +606,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
606{ 606{
607 struct drm_info_node *node = (struct drm_info_node *) m->private; 607 struct drm_info_node *node = (struct drm_info_node *) m->private;
608 struct drm_device *dev = node->minor->dev; 608 struct drm_device *dev = node->minor->dev;
609 drm_i915_private_t *dev_priv = dev->dev_private; 609 struct drm_i915_private *dev_priv = dev->dev_private;
610 struct intel_ring_buffer *ring; 610 struct intel_ring_buffer *ring;
611 int ret, i; 611 int ret, i;
612 612
@@ -629,7 +629,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
629{ 629{
630 struct drm_info_node *node = (struct drm_info_node *) m->private; 630 struct drm_info_node *node = (struct drm_info_node *) m->private;
631 struct drm_device *dev = node->minor->dev; 631 struct drm_device *dev = node->minor->dev;
632 drm_i915_private_t *dev_priv = dev->dev_private; 632 struct drm_i915_private *dev_priv = dev->dev_private;
633 struct intel_ring_buffer *ring; 633 struct intel_ring_buffer *ring;
634 int ret, i, pipe; 634 int ret, i, pipe;
635 635
@@ -770,7 +770,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
770{ 770{
771 struct drm_info_node *node = (struct drm_info_node *) m->private; 771 struct drm_info_node *node = (struct drm_info_node *) m->private;
772 struct drm_device *dev = node->minor->dev; 772 struct drm_device *dev = node->minor->dev;
773 drm_i915_private_t *dev_priv = dev->dev_private; 773 struct drm_i915_private *dev_priv = dev->dev_private;
774 int i, ret; 774 int i, ret;
775 775
776 ret = mutex_lock_interruptible(&dev->struct_mutex); 776 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -799,7 +799,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
799{ 799{
800 struct drm_info_node *node = (struct drm_info_node *) m->private; 800 struct drm_info_node *node = (struct drm_info_node *) m->private;
801 struct drm_device *dev = node->minor->dev; 801 struct drm_device *dev = node->minor->dev;
802 drm_i915_private_t *dev_priv = dev->dev_private; 802 struct drm_i915_private *dev_priv = dev->dev_private;
803 struct intel_ring_buffer *ring; 803 struct intel_ring_buffer *ring;
804 const u32 *hws; 804 const u32 *hws;
805 int i; 805 int i;
@@ -910,7 +910,7 @@ static int
910i915_next_seqno_get(void *data, u64 *val) 910i915_next_seqno_get(void *data, u64 *val)
911{ 911{
912 struct drm_device *dev = data; 912 struct drm_device *dev = data;
913 drm_i915_private_t *dev_priv = dev->dev_private; 913 struct drm_i915_private *dev_priv = dev->dev_private;
914 int ret; 914 int ret;
915 915
916 ret = mutex_lock_interruptible(&dev->struct_mutex); 916 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -947,7 +947,7 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
947{ 947{
948 struct drm_info_node *node = (struct drm_info_node *) m->private; 948 struct drm_info_node *node = (struct drm_info_node *) m->private;
949 struct drm_device *dev = node->minor->dev; 949 struct drm_device *dev = node->minor->dev;
950 drm_i915_private_t *dev_priv = dev->dev_private; 950 struct drm_i915_private *dev_priv = dev->dev_private;
951 u16 crstanddelay; 951 u16 crstanddelay;
952 int ret; 952 int ret;
953 953
@@ -970,7 +970,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
970{ 970{
971 struct drm_info_node *node = (struct drm_info_node *) m->private; 971 struct drm_info_node *node = (struct drm_info_node *) m->private;
972 struct drm_device *dev = node->minor->dev; 972 struct drm_device *dev = node->minor->dev;
973 drm_i915_private_t *dev_priv = dev->dev_private; 973 struct drm_i915_private *dev_priv = dev->dev_private;
974 int ret = 0; 974 int ret = 0;
975 975
976 intel_runtime_pm_get(dev_priv); 976 intel_runtime_pm_get(dev_priv);
@@ -1096,7 +1096,7 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
1096{ 1096{
1097 struct drm_info_node *node = (struct drm_info_node *) m->private; 1097 struct drm_info_node *node = (struct drm_info_node *) m->private;
1098 struct drm_device *dev = node->minor->dev; 1098 struct drm_device *dev = node->minor->dev;
1099 drm_i915_private_t *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 u32 delayfreq; 1100 u32 delayfreq;
1101 int ret, i; 1101 int ret, i;
1102 1102
@@ -1127,7 +1127,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
1127{ 1127{
1128 struct drm_info_node *node = (struct drm_info_node *) m->private; 1128 struct drm_info_node *node = (struct drm_info_node *) m->private;
1129 struct drm_device *dev = node->minor->dev; 1129 struct drm_device *dev = node->minor->dev;
1130 drm_i915_private_t *dev_priv = dev->dev_private; 1130 struct drm_i915_private *dev_priv = dev->dev_private;
1131 u32 inttoext; 1131 u32 inttoext;
1132 int ret, i; 1132 int ret, i;
1133 1133
@@ -1151,7 +1151,7 @@ static int ironlake_drpc_info(struct seq_file *m)
1151{ 1151{
1152 struct drm_info_node *node = (struct drm_info_node *) m->private; 1152 struct drm_info_node *node = (struct drm_info_node *) m->private;
1153 struct drm_device *dev = node->minor->dev; 1153 struct drm_device *dev = node->minor->dev;
1154 drm_i915_private_t *dev_priv = dev->dev_private; 1154 struct drm_i915_private *dev_priv = dev->dev_private;
1155 u32 rgvmodectl, rstdbyctl; 1155 u32 rgvmodectl, rstdbyctl;
1156 u16 crstandvid; 1156 u16 crstandvid;
1157 int ret; 1157 int ret;
@@ -1377,7 +1377,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1377{ 1377{
1378 struct drm_info_node *node = (struct drm_info_node *) m->private; 1378 struct drm_info_node *node = (struct drm_info_node *) m->private;
1379 struct drm_device *dev = node->minor->dev; 1379 struct drm_device *dev = node->minor->dev;
1380 drm_i915_private_t *dev_priv = dev->dev_private; 1380 struct drm_i915_private *dev_priv = dev->dev_private;
1381 1381
1382 if (!HAS_FBC(dev)) { 1382 if (!HAS_FBC(dev)) {
1383 seq_puts(m, "FBC unsupported on this chipset\n"); 1383 seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1462,7 +1462,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1462{ 1462{
1463 struct drm_info_node *node = (struct drm_info_node *) m->private; 1463 struct drm_info_node *node = (struct drm_info_node *) m->private;
1464 struct drm_device *dev = node->minor->dev; 1464 struct drm_device *dev = node->minor->dev;
1465 drm_i915_private_t *dev_priv = dev->dev_private; 1465 struct drm_i915_private *dev_priv = dev->dev_private;
1466 bool sr_enabled = false; 1466 bool sr_enabled = false;
1467 1467
1468 intel_runtime_pm_get(dev_priv); 1468 intel_runtime_pm_get(dev_priv);
@@ -1488,7 +1488,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1488{ 1488{
1489 struct drm_info_node *node = (struct drm_info_node *) m->private; 1489 struct drm_info_node *node = (struct drm_info_node *) m->private;
1490 struct drm_device *dev = node->minor->dev; 1490 struct drm_device *dev = node->minor->dev;
1491 drm_i915_private_t *dev_priv = dev->dev_private; 1491 struct drm_i915_private *dev_priv = dev->dev_private;
1492 unsigned long temp, chipset, gfx; 1492 unsigned long temp, chipset, gfx;
1493 int ret; 1493 int ret;
1494 1494
@@ -1516,7 +1516,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1516{ 1516{
1517 struct drm_info_node *node = (struct drm_info_node *) m->private; 1517 struct drm_info_node *node = (struct drm_info_node *) m->private;
1518 struct drm_device *dev = node->minor->dev; 1518 struct drm_device *dev = node->minor->dev;
1519 drm_i915_private_t *dev_priv = dev->dev_private; 1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 int ret = 0; 1520 int ret = 0;
1521 int gpu_freq, ia_freq; 1521 int gpu_freq, ia_freq;
1522 1522
@@ -1559,7 +1559,7 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1559{ 1559{
1560 struct drm_info_node *node = (struct drm_info_node *) m->private; 1560 struct drm_info_node *node = (struct drm_info_node *) m->private;
1561 struct drm_device *dev = node->minor->dev; 1561 struct drm_device *dev = node->minor->dev;
1562 drm_i915_private_t *dev_priv = dev->dev_private; 1562 struct drm_i915_private *dev_priv = dev->dev_private;
1563 int ret; 1563 int ret;
1564 1564
1565 ret = mutex_lock_interruptible(&dev->struct_mutex); 1565 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1579,7 +1579,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
1579{ 1579{
1580 struct drm_info_node *node = (struct drm_info_node *) m->private; 1580 struct drm_info_node *node = (struct drm_info_node *) m->private;
1581 struct drm_device *dev = node->minor->dev; 1581 struct drm_device *dev = node->minor->dev;
1582 drm_i915_private_t *dev_priv = dev->dev_private; 1582 struct drm_i915_private *dev_priv = dev->dev_private;
1583 struct intel_opregion *opregion = &dev_priv->opregion; 1583 struct intel_opregion *opregion = &dev_priv->opregion;
1584 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL); 1584 void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1585 int ret; 1585 int ret;
@@ -1653,7 +1653,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1653{ 1653{
1654 struct drm_info_node *node = (struct drm_info_node *) m->private; 1654 struct drm_info_node *node = (struct drm_info_node *) m->private;
1655 struct drm_device *dev = node->minor->dev; 1655 struct drm_device *dev = node->minor->dev;
1656 drm_i915_private_t *dev_priv = dev->dev_private; 1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 struct intel_ring_buffer *ring; 1657 struct intel_ring_buffer *ring;
1658 struct i915_hw_context *ctx; 1658 struct i915_hw_context *ctx;
1659 int ret, i; 1659 int ret, i;
@@ -2319,9 +2319,11 @@ static int i915_display_info(struct seq_file *m, void *unused)
2319{ 2319{
2320 struct drm_info_node *node = (struct drm_info_node *) m->private; 2320 struct drm_info_node *node = (struct drm_info_node *) m->private;
2321 struct drm_device *dev = node->minor->dev; 2321 struct drm_device *dev = node->minor->dev;
2322 struct drm_i915_private *dev_priv = dev->dev_private;
2322 struct intel_crtc *crtc; 2323 struct intel_crtc *crtc;
2323 struct drm_connector *connector; 2324 struct drm_connector *connector;
2324 2325
2326 intel_runtime_pm_get(dev_priv);
2325 drm_modeset_lock_all(dev); 2327 drm_modeset_lock_all(dev);
2326 seq_printf(m, "CRTC info\n"); 2328 seq_printf(m, "CRTC info\n");
2327 seq_printf(m, "---------\n"); 2329 seq_printf(m, "---------\n");
@@ -2332,14 +2334,15 @@ static int i915_display_info(struct seq_file *m, void *unused)
2332 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n", 2334 seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
2333 crtc->base.base.id, pipe_name(crtc->pipe), 2335 crtc->base.base.id, pipe_name(crtc->pipe),
2334 yesno(crtc->active)); 2336 yesno(crtc->active));
2335 if (crtc->active) 2337 if (crtc->active) {
2336 intel_crtc_info(m, crtc); 2338 intel_crtc_info(m, crtc);
2337 2339
2338 active = cursor_position(dev, crtc->pipe, &x, &y); 2340 active = cursor_position(dev, crtc->pipe, &x, &y);
2339 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n", 2341 seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
2340 yesno(crtc->cursor_visible), 2342 yesno(crtc->cursor_visible),
2341 x, y, crtc->cursor_addr, 2343 x, y, crtc->cursor_addr,
2342 yesno(active)); 2344 yesno(active));
2345 }
2343 } 2346 }
2344 2347
2345 seq_printf(m, "\n"); 2348 seq_printf(m, "\n");
@@ -2349,6 +2352,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
2349 intel_connector_info(m, connector); 2352 intel_connector_info(m, connector);
2350 } 2353 }
2351 drm_modeset_unlock_all(dev); 2354 drm_modeset_unlock_all(dev);
2355 intel_runtime_pm_put(dev_priv);
2352 2356
2353 return 0; 2357 return 0;
2354} 2358}
@@ -3271,7 +3275,7 @@ static int
3271i915_wedged_get(void *data, u64 *val) 3275i915_wedged_get(void *data, u64 *val)
3272{ 3276{
3273 struct drm_device *dev = data; 3277 struct drm_device *dev = data;
3274 drm_i915_private_t *dev_priv = dev->dev_private; 3278 struct drm_i915_private *dev_priv = dev->dev_private;
3275 3279
3276 *val = atomic_read(&dev_priv->gpu_error.reset_counter); 3280 *val = atomic_read(&dev_priv->gpu_error.reset_counter);
3277 3281
@@ -3296,7 +3300,7 @@ static int
3296i915_ring_stop_get(void *data, u64 *val) 3300i915_ring_stop_get(void *data, u64 *val)
3297{ 3301{
3298 struct drm_device *dev = data; 3302 struct drm_device *dev = data;
3299 drm_i915_private_t *dev_priv = dev->dev_private; 3303 struct drm_i915_private *dev_priv = dev->dev_private;
3300 3304
3301 *val = dev_priv->gpu_error.stop_rings; 3305 *val = dev_priv->gpu_error.stop_rings;
3302 3306
@@ -3473,7 +3477,7 @@ static int
3473i915_max_freq_get(void *data, u64 *val) 3477i915_max_freq_get(void *data, u64 *val)
3474{ 3478{
3475 struct drm_device *dev = data; 3479 struct drm_device *dev = data;
3476 drm_i915_private_t *dev_priv = dev->dev_private; 3480 struct drm_i915_private *dev_priv = dev->dev_private;
3477 int ret; 3481 int ret;
3478 3482
3479 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3483 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3554,7 +3558,7 @@ static int
3554i915_min_freq_get(void *data, u64 *val) 3558i915_min_freq_get(void *data, u64 *val)
3555{ 3559{
3556 struct drm_device *dev = data; 3560 struct drm_device *dev = data;
3557 drm_i915_private_t *dev_priv = dev->dev_private; 3561 struct drm_i915_private *dev_priv = dev->dev_private;
3558 int ret; 3562 int ret;
3559 3563
3560 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 3564 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -3635,7 +3639,7 @@ static int
3635i915_cache_sharing_get(void *data, u64 *val) 3639i915_cache_sharing_get(void *data, u64 *val)
3636{ 3640{
3637 struct drm_device *dev = data; 3641 struct drm_device *dev = data;
3638 drm_i915_private_t *dev_priv = dev->dev_private; 3642 struct drm_i915_private *dev_priv = dev->dev_private;
3639 u32 snpcr; 3643 u32 snpcr;
3640 int ret; 3644 int ret;
3641 3645
@@ -3695,7 +3699,6 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
3695 if (INTEL_INFO(dev)->gen < 6) 3699 if (INTEL_INFO(dev)->gen < 6)
3696 return 0; 3700 return 0;
3697 3701
3698 intel_runtime_pm_get(dev_priv);
3699 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 3702 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3700 3703
3701 return 0; 3704 return 0;
@@ -3710,7 +3713,6 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
3710 return 0; 3713 return 0;
3711 3714
3712 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3715 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3713 intel_runtime_pm_put(dev_priv);
3714 3716
3715 return 0; 3717 return 0;
3716} 3718}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4e0a26a83500..96177eec0a0e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -82,7 +82,7 @@ intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
82 82
83void i915_update_dri1_breadcrumb(struct drm_device *dev) 83void i915_update_dri1_breadcrumb(struct drm_device *dev)
84{ 84{
85 drm_i915_private_t *dev_priv = dev->dev_private; 85 struct drm_i915_private *dev_priv = dev->dev_private;
86 struct drm_i915_master_private *master_priv; 86 struct drm_i915_master_private *master_priv;
87 87
88 /* 88 /*
@@ -103,7 +103,7 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
103 103
104static void i915_write_hws_pga(struct drm_device *dev) 104static void i915_write_hws_pga(struct drm_device *dev)
105{ 105{
106 drm_i915_private_t *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = dev->dev_private;
107 u32 addr; 107 u32 addr;
108 108
109 addr = dev_priv->status_page_dmah->busaddr; 109 addr = dev_priv->status_page_dmah->busaddr;
@@ -118,7 +118,7 @@ static void i915_write_hws_pga(struct drm_device *dev)
118 */ 118 */
119static void i915_free_hws(struct drm_device *dev) 119static void i915_free_hws(struct drm_device *dev)
120{ 120{
121 drm_i915_private_t *dev_priv = dev->dev_private; 121 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_ring_buffer *ring = LP_RING(dev_priv); 122 struct intel_ring_buffer *ring = LP_RING(dev_priv);
123 123
124 if (dev_priv->status_page_dmah) { 124 if (dev_priv->status_page_dmah) {
@@ -137,7 +137,7 @@ static void i915_free_hws(struct drm_device *dev)
137 137
138void i915_kernel_lost_context(struct drm_device * dev) 138void i915_kernel_lost_context(struct drm_device * dev)
139{ 139{
140 drm_i915_private_t *dev_priv = dev->dev_private; 140 struct drm_i915_private *dev_priv = dev->dev_private;
141 struct drm_i915_master_private *master_priv; 141 struct drm_i915_master_private *master_priv;
142 struct intel_ring_buffer *ring = LP_RING(dev_priv); 142 struct intel_ring_buffer *ring = LP_RING(dev_priv);
143 143
@@ -164,7 +164,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
164 164
165static int i915_dma_cleanup(struct drm_device * dev) 165static int i915_dma_cleanup(struct drm_device * dev)
166{ 166{
167 drm_i915_private_t *dev_priv = dev->dev_private; 167 struct drm_i915_private *dev_priv = dev->dev_private;
168 int i; 168 int i;
169 169
170 /* Make sure interrupts are disabled here because the uninstall ioctl 170 /* Make sure interrupts are disabled here because the uninstall ioctl
@@ -188,7 +188,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
188 188
189static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) 189static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
190{ 190{
191 drm_i915_private_t *dev_priv = dev->dev_private; 191 struct drm_i915_private *dev_priv = dev->dev_private;
192 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 192 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
193 int ret; 193 int ret;
194 194
@@ -233,7 +233,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
233 233
234static int i915_dma_resume(struct drm_device * dev) 234static int i915_dma_resume(struct drm_device * dev)
235{ 235{
236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_ring_buffer *ring = LP_RING(dev_priv); 237 struct intel_ring_buffer *ring = LP_RING(dev_priv);
238 238
239 DRM_DEBUG_DRIVER("%s\n", __func__); 239 DRM_DEBUG_DRIVER("%s\n", __func__);
@@ -357,7 +357,7 @@ static int validate_cmd(int cmd)
357 357
358static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 358static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
359{ 359{
360 drm_i915_private_t *dev_priv = dev->dev_private; 360 struct drm_i915_private *dev_priv = dev->dev_private;
361 int i, ret; 361 int i, ret;
362 362
363 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 363 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
@@ -431,7 +431,7 @@ i915_emit_box(struct drm_device *dev,
431 431
432static void i915_emit_breadcrumb(struct drm_device *dev) 432static void i915_emit_breadcrumb(struct drm_device *dev)
433{ 433{
434 drm_i915_private_t *dev_priv = dev->dev_private; 434 struct drm_i915_private *dev_priv = dev->dev_private;
435 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 435 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
436 436
437 dev_priv->dri1.counter++; 437 dev_priv->dri1.counter++;
@@ -547,7 +547,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
547 547
548static int i915_dispatch_flip(struct drm_device * dev) 548static int i915_dispatch_flip(struct drm_device * dev)
549{ 549{
550 drm_i915_private_t *dev_priv = dev->dev_private; 550 struct drm_i915_private *dev_priv = dev->dev_private;
551 struct drm_i915_master_private *master_priv = 551 struct drm_i915_master_private *master_priv =
552 dev->primary->master->driver_priv; 552 dev->primary->master->driver_priv;
553 int ret; 553 int ret;
@@ -625,7 +625,7 @@ static int i915_flush_ioctl(struct drm_device *dev, void *data,
625static int i915_batchbuffer(struct drm_device *dev, void *data, 625static int i915_batchbuffer(struct drm_device *dev, void *data,
626 struct drm_file *file_priv) 626 struct drm_file *file_priv)
627{ 627{
628 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 628 struct drm_i915_private *dev_priv = dev->dev_private;
629 struct drm_i915_master_private *master_priv; 629 struct drm_i915_master_private *master_priv;
630 drm_i915_sarea_t *sarea_priv; 630 drm_i915_sarea_t *sarea_priv;
631 drm_i915_batchbuffer_t *batch = data; 631 drm_i915_batchbuffer_t *batch = data;
@@ -683,7 +683,7 @@ fail_free:
683static int i915_cmdbuffer(struct drm_device *dev, void *data, 683static int i915_cmdbuffer(struct drm_device *dev, void *data,
684 struct drm_file *file_priv) 684 struct drm_file *file_priv)
685{ 685{
686 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 686 struct drm_i915_private *dev_priv = dev->dev_private;
687 struct drm_i915_master_private *master_priv; 687 struct drm_i915_master_private *master_priv;
688 drm_i915_sarea_t *sarea_priv; 688 drm_i915_sarea_t *sarea_priv;
689 drm_i915_cmdbuffer_t *cmdbuf = data; 689 drm_i915_cmdbuffer_t *cmdbuf = data;
@@ -753,7 +753,7 @@ fail_batch_free:
753 753
754static int i915_emit_irq(struct drm_device * dev) 754static int i915_emit_irq(struct drm_device * dev)
755{ 755{
756 drm_i915_private_t *dev_priv = dev->dev_private; 756 struct drm_i915_private *dev_priv = dev->dev_private;
757 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 757 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
758 758
759 i915_kernel_lost_context(dev); 759 i915_kernel_lost_context(dev);
@@ -779,7 +779,7 @@ static int i915_emit_irq(struct drm_device * dev)
779 779
780static int i915_wait_irq(struct drm_device * dev, int irq_nr) 780static int i915_wait_irq(struct drm_device * dev, int irq_nr)
781{ 781{
782 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 782 struct drm_i915_private *dev_priv = dev->dev_private;
783 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 783 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
784 int ret = 0; 784 int ret = 0;
785 struct intel_ring_buffer *ring = LP_RING(dev_priv); 785 struct intel_ring_buffer *ring = LP_RING(dev_priv);
@@ -816,7 +816,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
816static int i915_irq_emit(struct drm_device *dev, void *data, 816static int i915_irq_emit(struct drm_device *dev, void *data,
817 struct drm_file *file_priv) 817 struct drm_file *file_priv)
818{ 818{
819 drm_i915_private_t *dev_priv = dev->dev_private; 819 struct drm_i915_private *dev_priv = dev->dev_private;
820 drm_i915_irq_emit_t *emit = data; 820 drm_i915_irq_emit_t *emit = data;
821 int result; 821 int result;
822 822
@@ -847,7 +847,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
847static int i915_irq_wait(struct drm_device *dev, void *data, 847static int i915_irq_wait(struct drm_device *dev, void *data,
848 struct drm_file *file_priv) 848 struct drm_file *file_priv)
849{ 849{
850 drm_i915_private_t *dev_priv = dev->dev_private; 850 struct drm_i915_private *dev_priv = dev->dev_private;
851 drm_i915_irq_wait_t *irqwait = data; 851 drm_i915_irq_wait_t *irqwait = data;
852 852
853 if (drm_core_check_feature(dev, DRIVER_MODESET)) 853 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -864,7 +864,7 @@ static int i915_irq_wait(struct drm_device *dev, void *data,
864static int i915_vblank_pipe_get(struct drm_device *dev, void *data, 864static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
865 struct drm_file *file_priv) 865 struct drm_file *file_priv)
866{ 866{
867 drm_i915_private_t *dev_priv = dev->dev_private; 867 struct drm_i915_private *dev_priv = dev->dev_private;
868 drm_i915_vblank_pipe_t *pipe = data; 868 drm_i915_vblank_pipe_t *pipe = data;
869 869
870 if (drm_core_check_feature(dev, DRIVER_MODESET)) 870 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -925,7 +925,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
925static int i915_getparam(struct drm_device *dev, void *data, 925static int i915_getparam(struct drm_device *dev, void *data,
926 struct drm_file *file_priv) 926 struct drm_file *file_priv)
927{ 927{
928 drm_i915_private_t *dev_priv = dev->dev_private; 928 struct drm_i915_private *dev_priv = dev->dev_private;
929 drm_i915_getparam_t *param = data; 929 drm_i915_getparam_t *param = data;
930 int value; 930 int value;
931 931
@@ -1033,7 +1033,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
1033static int i915_setparam(struct drm_device *dev, void *data, 1033static int i915_setparam(struct drm_device *dev, void *data,
1034 struct drm_file *file_priv) 1034 struct drm_file *file_priv)
1035{ 1035{
1036 drm_i915_private_t *dev_priv = dev->dev_private; 1036 struct drm_i915_private *dev_priv = dev->dev_private;
1037 drm_i915_setparam_t *param = data; 1037 drm_i915_setparam_t *param = data;
1038 1038
1039 if (!dev_priv) { 1039 if (!dev_priv) {
@@ -1068,7 +1068,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
1068static int i915_set_status_page(struct drm_device *dev, void *data, 1068static int i915_set_status_page(struct drm_device *dev, void *data,
1069 struct drm_file *file_priv) 1069 struct drm_file *file_priv)
1070{ 1070{
1071 drm_i915_private_t *dev_priv = dev->dev_private; 1071 struct drm_i915_private *dev_priv = dev->dev_private;
1072 drm_i915_hws_addr_t *hws = data; 1072 drm_i915_hws_addr_t *hws = data;
1073 struct intel_ring_buffer *ring; 1073 struct intel_ring_buffer *ring;
1074 1074
@@ -1136,7 +1136,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
1136static int 1136static int
1137intel_alloc_mchbar_resource(struct drm_device *dev) 1137intel_alloc_mchbar_resource(struct drm_device *dev)
1138{ 1138{
1139 drm_i915_private_t *dev_priv = dev->dev_private; 1139 struct drm_i915_private *dev_priv = dev->dev_private;
1140 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1140 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1141 u32 temp_lo, temp_hi = 0; 1141 u32 temp_lo, temp_hi = 0;
1142 u64 mchbar_addr; 1142 u64 mchbar_addr;
@@ -1182,7 +1182,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
1182static void 1182static void
1183intel_setup_mchbar(struct drm_device *dev) 1183intel_setup_mchbar(struct drm_device *dev)
1184{ 1184{
1185 drm_i915_private_t *dev_priv = dev->dev_private; 1185 struct drm_i915_private *dev_priv = dev->dev_private;
1186 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1186 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1187 u32 temp; 1187 u32 temp;
1188 bool enabled; 1188 bool enabled;
@@ -1222,7 +1222,7 @@ intel_setup_mchbar(struct drm_device *dev)
1222static void 1222static void
1223intel_teardown_mchbar(struct drm_device *dev) 1223intel_teardown_mchbar(struct drm_device *dev)
1224{ 1224{
1225 drm_i915_private_t *dev_priv = dev->dev_private; 1225 struct drm_i915_private *dev_priv = dev->dev_private;
1226 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 1226 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
1227 u32 temp; 1227 u32 temp;
1228 1228
@@ -1895,7 +1895,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1895 */ 1895 */
1896void i915_driver_lastclose(struct drm_device * dev) 1896void i915_driver_lastclose(struct drm_device * dev)
1897{ 1897{
1898 drm_i915_private_t *dev_priv = dev->dev_private; 1898 struct drm_i915_private *dev_priv = dev->dev_private;
1899 1899
1900 /* On gen6+ we refuse to init without kms enabled, but then the drm core 1900 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1901 * goes right around and calls lastclose. Check for this and don't clean 1901 * goes right around and calls lastclose. Check for this and don't clean
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fa5d0ed76378..82f4d1f47d3b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -537,14 +537,21 @@ static void intel_resume_hotplug(struct drm_device *dev)
537 drm_helper_hpd_irq_event(dev); 537 drm_helper_hpd_irq_event(dev);
538} 538}
539 539
540static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) 540static int i915_drm_thaw_early(struct drm_device *dev)
541{ 541{
542 struct drm_i915_private *dev_priv = dev->dev_private; 542 struct drm_i915_private *dev_priv = dev->dev_private;
543 int error = 0;
544 543
545 intel_uncore_early_sanitize(dev); 544 intel_uncore_early_sanitize(dev);
546
547 intel_uncore_sanitize(dev); 545 intel_uncore_sanitize(dev);
546 intel_power_domains_init_hw(dev_priv);
547
548 return 0;
549}
550
551static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
552{
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 int error = 0;
548 555
549 if (drm_core_check_feature(dev, DRIVER_MODESET) && 556 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
550 restore_gtt_mappings) { 557 restore_gtt_mappings) {
@@ -553,8 +560,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
553 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
554 } 561 }
555 562
556 intel_power_domains_init_hw(dev_priv);
557
558 i915_restore_state(dev); 563 i915_restore_state(dev);
559 intel_opregion_setup(dev); 564 intel_opregion_setup(dev);
560 565
@@ -619,19 +624,33 @@ static int i915_drm_thaw(struct drm_device *dev)
619 return __i915_drm_thaw(dev, true); 624 return __i915_drm_thaw(dev, true);
620} 625}
621 626
622int i915_resume(struct drm_device *dev) 627static int i915_resume_early(struct drm_device *dev)
623{ 628{
624 struct drm_i915_private *dev_priv = dev->dev_private;
625 int ret;
626
627 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 629 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
628 return 0; 630 return 0;
629 631
632 /*
633 * We have a resume ordering issue with the snd-hda driver also
634 * requiring our device to be power up. Due to the lack of a
635 * parent/child relationship we currently solve this with an early
636 * resume hook.
637 *
638 * FIXME: This should be solved with a special hdmi sink device or
639 * similar so that power domains can be employed.
640 */
630 if (pci_enable_device(dev->pdev)) 641 if (pci_enable_device(dev->pdev))
631 return -EIO; 642 return -EIO;
632 643
633 pci_set_master(dev->pdev); 644 pci_set_master(dev->pdev);
634 645
646 return i915_drm_thaw_early(dev);
647}
648
649int i915_resume(struct drm_device *dev)
650{
651 struct drm_i915_private *dev_priv = dev->dev_private;
652 int ret;
653
635 /* 654 /*
636 * Platforms with opregion should have sane BIOS, older ones (gen3 and 655 * Platforms with opregion should have sane BIOS, older ones (gen3 and
637 * earlier) need to restore the GTT mappings since the BIOS might clear 656 * earlier) need to restore the GTT mappings since the BIOS might clear
@@ -645,6 +664,14 @@ int i915_resume(struct drm_device *dev)
645 return 0; 664 return 0;
646} 665}
647 666
667static int i915_resume_legacy(struct drm_device *dev)
668{
669 i915_resume_early(dev);
670 i915_resume(dev);
671
672 return 0;
673}
674
648/** 675/**
649 * i915_reset - reset chip after a hang 676 * i915_reset - reset chip after a hang
650 * @dev: drm device to reset 677 * @dev: drm device to reset
@@ -662,7 +689,7 @@ int i915_resume(struct drm_device *dev)
662 */ 689 */
663int i915_reset(struct drm_device *dev) 690int i915_reset(struct drm_device *dev)
664{ 691{
665 drm_i915_private_t *dev_priv = dev->dev_private; 692 struct drm_i915_private *dev_priv = dev->dev_private;
666 bool simulated; 693 bool simulated;
667 int ret; 694 int ret;
668 695
@@ -776,7 +803,6 @@ static int i915_pm_suspend(struct device *dev)
776{ 803{
777 struct pci_dev *pdev = to_pci_dev(dev); 804 struct pci_dev *pdev = to_pci_dev(dev);
778 struct drm_device *drm_dev = pci_get_drvdata(pdev); 805 struct drm_device *drm_dev = pci_get_drvdata(pdev);
779 int error;
780 806
781 if (!drm_dev || !drm_dev->dev_private) { 807 if (!drm_dev || !drm_dev->dev_private) {
782 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 808 dev_err(dev, "DRM not initialized, aborting suspend.\n");
@@ -786,9 +812,25 @@ static int i915_pm_suspend(struct device *dev)
786 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 812 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
787 return 0; 813 return 0;
788 814
789 error = i915_drm_freeze(drm_dev); 815 return i915_drm_freeze(drm_dev);
790 if (error) 816}
791 return error; 817
818static int i915_pm_suspend_late(struct device *dev)
819{
820 struct pci_dev *pdev = to_pci_dev(dev);
821 struct drm_device *drm_dev = pci_get_drvdata(pdev);
822
823 /*
824 * We have a suspedn ordering issue with the snd-hda driver also
825 * requiring our device to be power up. Due to the lack of a
826 * parent/child relationship we currently solve this with an late
827 * suspend hook.
828 *
829 * FIXME: This should be solved with a special hdmi sink device or
830 * similar so that power domains can be employed.
831 */
832 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
833 return 0;
792 834
793 pci_disable_device(pdev); 835 pci_disable_device(pdev);
794 pci_set_power_state(pdev, PCI_D3hot); 836 pci_set_power_state(pdev, PCI_D3hot);
@@ -796,6 +838,14 @@ static int i915_pm_suspend(struct device *dev)
796 return 0; 838 return 0;
797} 839}
798 840
841static int i915_pm_resume_early(struct device *dev)
842{
843 struct pci_dev *pdev = to_pci_dev(dev);
844 struct drm_device *drm_dev = pci_get_drvdata(pdev);
845
846 return i915_resume_early(drm_dev);
847}
848
799static int i915_pm_resume(struct device *dev) 849static int i915_pm_resume(struct device *dev)
800{ 850{
801 struct pci_dev *pdev = to_pci_dev(dev); 851 struct pci_dev *pdev = to_pci_dev(dev);
@@ -817,6 +867,14 @@ static int i915_pm_freeze(struct device *dev)
817 return i915_drm_freeze(drm_dev); 867 return i915_drm_freeze(drm_dev);
818} 868}
819 869
870static int i915_pm_thaw_early(struct device *dev)
871{
872 struct pci_dev *pdev = to_pci_dev(dev);
873 struct drm_device *drm_dev = pci_get_drvdata(pdev);
874
875 return i915_drm_thaw_early(drm_dev);
876}
877
820static int i915_pm_thaw(struct device *dev) 878static int i915_pm_thaw(struct device *dev)
821{ 879{
822 struct pci_dev *pdev = to_pci_dev(dev); 880 struct pci_dev *pdev = to_pci_dev(dev);
@@ -887,10 +945,14 @@ static int i915_runtime_resume(struct device *device)
887 945
888static const struct dev_pm_ops i915_pm_ops = { 946static const struct dev_pm_ops i915_pm_ops = {
889 .suspend = i915_pm_suspend, 947 .suspend = i915_pm_suspend,
948 .suspend_late = i915_pm_suspend_late,
949 .resume_early = i915_pm_resume_early,
890 .resume = i915_pm_resume, 950 .resume = i915_pm_resume,
891 .freeze = i915_pm_freeze, 951 .freeze = i915_pm_freeze,
952 .thaw_early = i915_pm_thaw_early,
892 .thaw = i915_pm_thaw, 953 .thaw = i915_pm_thaw,
893 .poweroff = i915_pm_poweroff, 954 .poweroff = i915_pm_poweroff,
955 .restore_early = i915_pm_resume_early,
894 .restore = i915_pm_resume, 956 .restore = i915_pm_resume,
895 .runtime_suspend = i915_runtime_suspend, 957 .runtime_suspend = i915_runtime_suspend,
896 .runtime_resume = i915_runtime_resume, 958 .runtime_resume = i915_runtime_resume,
@@ -933,7 +995,7 @@ static struct drm_driver driver = {
933 995
934 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 996 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
935 .suspend = i915_suspend, 997 .suspend = i915_suspend,
936 .resume = i915_resume, 998 .resume = i915_resume_legacy,
937 999
938 .device_is_agp = i915_driver_device_is_agp, 1000 .device_is_agp = i915_driver_device_is_agp,
939 .master_create = i915_master_create, 1001 .master_create = i915_master_create,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3f62be0fb5c5..0905cd915589 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -351,12 +351,12 @@ struct drm_i915_error_state {
351 u32 ipeir; 351 u32 ipeir;
352 u32 ipehr; 352 u32 ipehr;
353 u32 instdone; 353 u32 instdone;
354 u32 acthd;
355 u32 bbstate; 354 u32 bbstate;
356 u32 instpm; 355 u32 instpm;
357 u32 instps; 356 u32 instps;
358 u32 seqno; 357 u32 seqno;
359 u64 bbaddr; 358 u64 bbaddr;
359 u64 acthd;
360 u32 fault_reg; 360 u32 fault_reg;
361 u32 faddr; 361 u32 faddr;
362 u32 rc_psmi; /* sleep state */ 362 u32 rc_psmi; /* sleep state */
@@ -1001,9 +1001,6 @@ struct intel_gen6_power_mgmt {
1001 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1001 u8 rp1_freq; /* "less than" RP0 power/freqency */
1002 u8 rp0_freq; /* Non-overclocked max frequency. */ 1002 u8 rp0_freq; /* Non-overclocked max frequency. */
1003 1003
1004 bool rp_up_masked;
1005 bool rp_down_masked;
1006
1007 int last_adj; 1004 int last_adj;
1008 enum { LOW_POWER, BETWEEN, HIGH_POWER } power; 1005 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
1009 1006
@@ -1468,6 +1465,7 @@ typedef struct drm_i915_private {
1468 }; 1465 };
1469 u32 gt_irq_mask; 1466 u32 gt_irq_mask;
1470 u32 pm_irq_mask; 1467 u32 pm_irq_mask;
1468 u32 pm_rps_events;
1471 u32 pipestat_irq_mask[I915_MAX_PIPES]; 1469 u32 pipestat_irq_mask[I915_MAX_PIPES];
1472 1470
1473 struct work_struct hotplug_work; 1471 struct work_struct hotplug_work;
@@ -2130,11 +2128,11 @@ extern void intel_uncore_check_errors(struct drm_device *dev);
2130extern void intel_uncore_fini(struct drm_device *dev); 2128extern void intel_uncore_fini(struct drm_device *dev);
2131 2129
2132void 2130void
2133i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, 2131i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2134 u32 status_mask); 2132 u32 status_mask);
2135 2133
2136void 2134void
2137i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, 2135i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2138 u32 status_mask); 2136 u32 status_mask);
2139 2137
2140void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2138void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
@@ -2504,7 +2502,7 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2504/* i915_gem_tiling.c */ 2502/* i915_gem_tiling.c */
2505static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 2503static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2506{ 2504{
2507 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2505 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2508 2506
2509 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 2507 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2510 obj->tiling_mode != I915_TILING_NONE; 2508 obj->tiling_mode != I915_TILING_NONE;
@@ -2745,6 +2743,17 @@ void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2745#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) 2743#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2746#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 2744#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2747 2745
2746#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
2747 u32 upper = I915_READ(upper_reg); \
2748 u32 lower = I915_READ(lower_reg); \
2749 u32 tmp = I915_READ(upper_reg); \
2750 if (upper != tmp) { \
2751 upper = tmp; \
2752 lower = I915_READ(lower_reg); \
2753 WARN_ON(I915_READ(upper_reg) != upper); \
2754 } \
2755 (u64)upper << 32 | lower; })
2756
2748#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2757#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2749#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2758#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2750 2759
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 404a5456bf3a..6370a761d137 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -615,7 +615,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
615 struct drm_i915_gem_pwrite *args, 615 struct drm_i915_gem_pwrite *args,
616 struct drm_file *file) 616 struct drm_file *file)
617{ 617{
618 drm_i915_private_t *dev_priv = dev->dev_private; 618 struct drm_i915_private *dev_priv = dev->dev_private;
619 ssize_t remain; 619 ssize_t remain;
620 loff_t offset, page_base; 620 loff_t offset, page_base;
621 char __user *user_data; 621 char __user *user_data;
@@ -1027,7 +1027,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1027 struct drm_i915_file_private *file_priv) 1027 struct drm_i915_file_private *file_priv)
1028{ 1028{
1029 struct drm_device *dev = ring->dev; 1029 struct drm_device *dev = ring->dev;
1030 drm_i915_private_t *dev_priv = dev->dev_private; 1030 struct drm_i915_private *dev_priv = dev->dev_private;
1031 const bool irq_test_in_progress = 1031 const bool irq_test_in_progress =
1032 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); 1032 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1033 struct timespec before, now; 1033 struct timespec before, now;
@@ -1389,7 +1389,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1389{ 1389{
1390 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); 1390 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1391 struct drm_device *dev = obj->base.dev; 1391 struct drm_device *dev = obj->base.dev;
1392 drm_i915_private_t *dev_priv = dev->dev_private; 1392 struct drm_i915_private *dev_priv = dev->dev_private;
1393 pgoff_t page_offset; 1393 pgoff_t page_offset;
1394 unsigned long pfn; 1394 unsigned long pfn;
1395 int ret = 0; 1395 int ret = 0;
@@ -2164,7 +2164,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
2164 struct drm_i915_gem_object *obj, 2164 struct drm_i915_gem_object *obj,
2165 u32 *out_seqno) 2165 u32 *out_seqno)
2166{ 2166{
2167 drm_i915_private_t *dev_priv = ring->dev->dev_private; 2167 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2168 struct drm_i915_gem_request *request; 2168 struct drm_i915_gem_request *request;
2169 u32 request_ring_position, request_start; 2169 u32 request_ring_position, request_start;
2170 int ret; 2170 int ret;
@@ -2496,7 +2496,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2496bool 2496bool
2497i915_gem_retire_requests(struct drm_device *dev) 2497i915_gem_retire_requests(struct drm_device *dev)
2498{ 2498{
2499 drm_i915_private_t *dev_priv = dev->dev_private; 2499 struct drm_i915_private *dev_priv = dev->dev_private;
2500 struct intel_ring_buffer *ring; 2500 struct intel_ring_buffer *ring;
2501 bool idle = true; 2501 bool idle = true;
2502 int i; 2502 int i;
@@ -2588,7 +2588,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2588int 2588int
2589i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2589i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2590{ 2590{
2591 drm_i915_private_t *dev_priv = dev->dev_private; 2591 struct drm_i915_private *dev_priv = dev->dev_private;
2592 struct drm_i915_gem_wait *args = data; 2592 struct drm_i915_gem_wait *args = data;
2593 struct drm_i915_gem_object *obj; 2593 struct drm_i915_gem_object *obj;
2594 struct intel_ring_buffer *ring = NULL; 2594 struct intel_ring_buffer *ring = NULL;
@@ -2723,7 +2723,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2723int i915_vma_unbind(struct i915_vma *vma) 2723int i915_vma_unbind(struct i915_vma *vma)
2724{ 2724{
2725 struct drm_i915_gem_object *obj = vma->obj; 2725 struct drm_i915_gem_object *obj = vma->obj;
2726 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2726 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2727 int ret; 2727 int ret;
2728 2728
2729 if (list_empty(&vma->vma_link)) 2729 if (list_empty(&vma->vma_link))
@@ -2784,7 +2784,7 @@ int i915_vma_unbind(struct i915_vma *vma)
2784 2784
2785int i915_gpu_idle(struct drm_device *dev) 2785int i915_gpu_idle(struct drm_device *dev)
2786{ 2786{
2787 drm_i915_private_t *dev_priv = dev->dev_private; 2787 struct drm_i915_private *dev_priv = dev->dev_private;
2788 struct intel_ring_buffer *ring; 2788 struct intel_ring_buffer *ring;
2789 int ret, i; 2789 int ret, i;
2790 2790
@@ -2805,7 +2805,7 @@ int i915_gpu_idle(struct drm_device *dev)
2805static void i965_write_fence_reg(struct drm_device *dev, int reg, 2805static void i965_write_fence_reg(struct drm_device *dev, int reg,
2806 struct drm_i915_gem_object *obj) 2806 struct drm_i915_gem_object *obj)
2807{ 2807{
2808 drm_i915_private_t *dev_priv = dev->dev_private; 2808 struct drm_i915_private *dev_priv = dev->dev_private;
2809 int fence_reg; 2809 int fence_reg;
2810 int fence_pitch_shift; 2810 int fence_pitch_shift;
2811 2811
@@ -2857,7 +2857,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
2857static void i915_write_fence_reg(struct drm_device *dev, int reg, 2857static void i915_write_fence_reg(struct drm_device *dev, int reg,
2858 struct drm_i915_gem_object *obj) 2858 struct drm_i915_gem_object *obj)
2859{ 2859{
2860 drm_i915_private_t *dev_priv = dev->dev_private; 2860 struct drm_i915_private *dev_priv = dev->dev_private;
2861 u32 val; 2861 u32 val;
2862 2862
2863 if (obj) { 2863 if (obj) {
@@ -2901,7 +2901,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
2901static void i830_write_fence_reg(struct drm_device *dev, int reg, 2901static void i830_write_fence_reg(struct drm_device *dev, int reg,
2902 struct drm_i915_gem_object *obj) 2902 struct drm_i915_gem_object *obj)
2903{ 2903{
2904 drm_i915_private_t *dev_priv = dev->dev_private; 2904 struct drm_i915_private *dev_priv = dev->dev_private;
2905 uint32_t val; 2905 uint32_t val;
2906 2906
2907 if (obj) { 2907 if (obj) {
@@ -3211,7 +3211,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3211 unsigned flags) 3211 unsigned flags)
3212{ 3212{
3213 struct drm_device *dev = obj->base.dev; 3213 struct drm_device *dev = obj->base.dev;
3214 drm_i915_private_t *dev_priv = dev->dev_private; 3214 struct drm_i915_private *dev_priv = dev->dev_private;
3215 u32 size, fence_size, fence_alignment, unfenced_alignment; 3215 u32 size, fence_size, fence_alignment, unfenced_alignment;
3216 size_t gtt_max = 3216 size_t gtt_max =
3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3217 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
@@ -3410,7 +3410,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3410int 3410int
3411i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) 3411i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3412{ 3412{
3413 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 3413 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3414 uint32_t old_write_domain, old_read_domains; 3414 uint32_t old_write_domain, old_read_domains;
3415 int ret; 3415 int ret;
3416 3416
@@ -4156,7 +4156,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4156{ 4156{
4157 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4157 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4158 struct drm_device *dev = obj->base.dev; 4158 struct drm_device *dev = obj->base.dev;
4159 drm_i915_private_t *dev_priv = dev->dev_private; 4159 struct drm_i915_private *dev_priv = dev->dev_private;
4160 struct i915_vma *vma, *next; 4160 struct i915_vma *vma, *next;
4161 4161
4162 intel_runtime_pm_get(dev_priv); 4162 intel_runtime_pm_get(dev_priv);
@@ -4235,7 +4235,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
4235int 4235int
4236i915_gem_suspend(struct drm_device *dev) 4236i915_gem_suspend(struct drm_device *dev)
4237{ 4237{
4238 drm_i915_private_t *dev_priv = dev->dev_private; 4238 struct drm_i915_private *dev_priv = dev->dev_private;
4239 int ret = 0; 4239 int ret = 0;
4240 4240
4241 mutex_lock(&dev->struct_mutex); 4241 mutex_lock(&dev->struct_mutex);
@@ -4277,7 +4277,7 @@ err:
4277int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) 4277int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4278{ 4278{
4279 struct drm_device *dev = ring->dev; 4279 struct drm_device *dev = ring->dev;
4280 drm_i915_private_t *dev_priv = dev->dev_private; 4280 struct drm_i915_private *dev_priv = dev->dev_private;
4281 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); 4281 u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4282 u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; 4282 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4283 int i, ret; 4283 int i, ret;
@@ -4307,7 +4307,7 @@ int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
4307 4307
4308void i915_gem_init_swizzling(struct drm_device *dev) 4308void i915_gem_init_swizzling(struct drm_device *dev)
4309{ 4309{
4310 drm_i915_private_t *dev_priv = dev->dev_private; 4310 struct drm_i915_private *dev_priv = dev->dev_private;
4311 4311
4312 if (INTEL_INFO(dev)->gen < 5 || 4312 if (INTEL_INFO(dev)->gen < 5 ||
4313 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 4313 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4395,7 +4395,7 @@ cleanup_render_ring:
4395int 4395int
4396i915_gem_init_hw(struct drm_device *dev) 4396i915_gem_init_hw(struct drm_device *dev)
4397{ 4397{
4398 drm_i915_private_t *dev_priv = dev->dev_private; 4398 struct drm_i915_private *dev_priv = dev->dev_private;
4399 int ret, i; 4399 int ret, i;
4400 4400
4401 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4401 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
@@ -4489,7 +4489,7 @@ int i915_gem_init(struct drm_device *dev)
4489void 4489void
4490i915_gem_cleanup_ringbuffer(struct drm_device *dev) 4490i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4491{ 4491{
4492 drm_i915_private_t *dev_priv = dev->dev_private; 4492 struct drm_i915_private *dev_priv = dev->dev_private;
4493 struct intel_ring_buffer *ring; 4493 struct intel_ring_buffer *ring;
4494 int i; 4494 int i;
4495 4495
@@ -4586,7 +4586,7 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
4586void 4586void
4587i915_gem_load(struct drm_device *dev) 4587i915_gem_load(struct drm_device *dev)
4588{ 4588{
4589 drm_i915_private_t *dev_priv = dev->dev_private; 4589 struct drm_i915_private *dev_priv = dev->dev_private;
4590 int i; 4590 int i;
4591 4591
4592 dev_priv->slab = 4592 dev_priv->slab =
@@ -4653,7 +4653,7 @@ i915_gem_load(struct drm_device *dev)
4653static int i915_gem_init_phys_object(struct drm_device *dev, 4653static int i915_gem_init_phys_object(struct drm_device *dev,
4654 int id, int size, int align) 4654 int id, int size, int align)
4655{ 4655{
4656 drm_i915_private_t *dev_priv = dev->dev_private; 4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct drm_i915_gem_phys_object *phys_obj; 4657 struct drm_i915_gem_phys_object *phys_obj;
4658 int ret; 4658 int ret;
4659 4659
@@ -4685,7 +4685,7 @@ kfree_obj:
4685 4685
4686static void i915_gem_free_phys_object(struct drm_device *dev, int id) 4686static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4687{ 4687{
4688 drm_i915_private_t *dev_priv = dev->dev_private; 4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct drm_i915_gem_phys_object *phys_obj; 4689 struct drm_i915_gem_phys_object *phys_obj;
4690 4690
4691 if (!dev_priv->mm.phys_objs[id - 1]) 4691 if (!dev_priv->mm.phys_objs[id - 1])
@@ -4752,7 +4752,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4752 int align) 4752 int align)
4753{ 4753{
4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 4754 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4755 drm_i915_private_t *dev_priv = dev->dev_private; 4755 struct drm_i915_private *dev_priv = dev->dev_private;
4756 int ret = 0; 4756 int ret = 0;
4757 int page_count; 4757 int page_count;
4758 int i; 4758 int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 775d506b3208..f462d1b51d97 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -34,7 +34,7 @@ int
34i915_verify_lists(struct drm_device *dev) 34i915_verify_lists(struct drm_device *dev)
35{ 35{
36 static int warned; 36 static int warned;
37 drm_i915_private_t *dev_priv = dev->dev_private; 37 struct drm_i915_private *dev_priv = dev->dev_private;
38 struct drm_i915_gem_object *obj; 38 struct drm_i915_gem_object *obj;
39 int err = 0; 39 int err = 0;
40 40
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 8a78f7885cba..75fca63dc8c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -70,7 +70,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
70 int min_size, unsigned alignment, unsigned cache_level, 70 int min_size, unsigned alignment, unsigned cache_level,
71 unsigned flags) 71 unsigned flags)
72{ 72{
73 drm_i915_private_t *dev_priv = dev->dev_private; 73 struct drm_i915_private *dev_priv = dev->dev_private;
74 struct list_head eviction_list, unwind_list; 74 struct list_head eviction_list, unwind_list;
75 struct i915_vma *vma; 75 struct i915_vma *vma;
76 int ret = 0; 76 int ret = 0;
@@ -243,7 +243,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
243int 243int
244i915_gem_evict_everything(struct drm_device *dev) 244i915_gem_evict_everything(struct drm_device *dev)
245{ 245{
246 drm_i915_private_t *dev_priv = dev->dev_private; 246 struct drm_i915_private *dev_priv = dev->dev_private;
247 struct i915_address_space *vm; 247 struct i915_address_space *vm;
248 bool lists_empty = true; 248 bool lists_empty = true;
249 int ret; 249 int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3851a1b1dc88..7447160155a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -978,7 +978,7 @@ static int
978i915_reset_gen7_sol_offsets(struct drm_device *dev, 978i915_reset_gen7_sol_offsets(struct drm_device *dev,
979 struct intel_ring_buffer *ring) 979 struct intel_ring_buffer *ring)
980{ 980{
981 drm_i915_private_t *dev_priv = dev->dev_private; 981 struct drm_i915_private *dev_priv = dev->dev_private;
982 int ret, i; 982 int ret, i;
983 983
984 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) 984 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
@@ -1005,7 +1005,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1005 struct drm_i915_gem_execbuffer2 *args, 1005 struct drm_i915_gem_execbuffer2 *args,
1006 struct drm_i915_gem_exec_object2 *exec) 1006 struct drm_i915_gem_exec_object2 *exec)
1007{ 1007{
1008 drm_i915_private_t *dev_priv = dev->dev_private; 1008 struct drm_i915_private *dev_priv = dev->dev_private;
1009 struct eb_vmas *eb; 1009 struct eb_vmas *eb;
1010 struct drm_i915_gem_object *batch_obj; 1010 struct drm_i915_gem_object *batch_obj;
1011 struct drm_clip_rect *cliprects = NULL; 1011 struct drm_clip_rect *cliprects = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2b3c79923d90..ab5e93c30aa2 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -888,7 +888,7 @@ err_out:
888static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 888static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
889{ 889{
890 struct drm_device *dev = ppgtt->base.dev; 890 struct drm_device *dev = ppgtt->base.dev;
891 drm_i915_private_t *dev_priv = dev->dev_private; 891 struct drm_i915_private *dev_priv = dev->dev_private;
892 struct intel_ring_buffer *ring; 892 struct intel_ring_buffer *ring;
893 uint32_t ecochk, ecobits; 893 uint32_t ecochk, ecobits;
894 int i; 894 int i;
@@ -927,7 +927,7 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
927static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt) 927static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
928{ 928{
929 struct drm_device *dev = ppgtt->base.dev; 929 struct drm_device *dev = ppgtt->base.dev;
930 drm_i915_private_t *dev_priv = dev->dev_private; 930 struct drm_i915_private *dev_priv = dev->dev_private;
931 struct intel_ring_buffer *ring; 931 struct intel_ring_buffer *ring;
932 uint32_t ecochk, gab_ctl, ecobits; 932 uint32_t ecochk, gab_ctl, ecobits;
933 int i; 933 int i;
@@ -1340,7 +1340,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1340 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, 1340 dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1341 dev_priv->gtt.base.start, 1341 dev_priv->gtt.base.start,
1342 dev_priv->gtt.base.total, 1342 dev_priv->gtt.base.total,
1343 false); 1343 true);
1344} 1344}
1345 1345
1346void i915_gem_restore_gtt_mappings(struct drm_device *dev) 1346void i915_gem_restore_gtt_mappings(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index d58b4e287e32..62ef55ba061c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -214,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
214 struct drm_i915_private *dev_priv = dev->dev_private; 214 struct drm_i915_private *dev_priv = dev->dev_private;
215 int bios_reserved = 0; 215 int bios_reserved = 0;
216 216
217#ifdef CONFIG_INTEL_IOMMU
218 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
219 DRM_INFO("DMAR active, disabling use of stolen memory\n");
220 return 0;
221 }
222#endif
223
217 if (dev_priv->gtt.stolen_size == 0) 224 if (dev_priv->gtt.stolen_size == 0)
218 return 0; 225 return 0;
219 226
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index eb993584aa6b..cb150e8b4336 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -87,7 +87,7 @@
87void 87void
88i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 88i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
89{ 89{
90 drm_i915_private_t *dev_priv = dev->dev_private; 90 struct drm_i915_private *dev_priv = dev->dev_private;
91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
93 93
@@ -294,7 +294,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
294 struct drm_file *file) 294 struct drm_file *file)
295{ 295{
296 struct drm_i915_gem_set_tiling *args = data; 296 struct drm_i915_gem_set_tiling *args = data;
297 drm_i915_private_t *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = dev->dev_private;
298 struct drm_i915_gem_object *obj; 298 struct drm_i915_gem_object *obj;
299 int ret = 0; 299 int ret = 0;
300 300
@@ -415,7 +415,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
415 struct drm_file *file) 415 struct drm_file *file)
416{ 416{
417 struct drm_i915_gem_get_tiling *args = data; 417 struct drm_i915_gem_get_tiling *args = data;
418 drm_i915_private_t *dev_priv = dev->dev_private; 418 struct drm_i915_private *dev_priv = dev->dev_private;
419 struct drm_i915_gem_object *obj; 419 struct drm_i915_gem_object *obj;
420 420
421 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 421 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index baf1ca690dc5..12f1d43b2d68 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -247,12 +247,12 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
247 err_printf(m, " TAIL: 0x%08x\n", ring->tail); 247 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
248 err_printf(m, " CTL: 0x%08x\n", ring->ctl); 248 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
249 err_printf(m, " HWS: 0x%08x\n", ring->hws); 249 err_printf(m, " HWS: 0x%08x\n", ring->hws);
250 err_printf(m, " ACTHD: 0x%08x\n", ring->acthd); 250 err_printf(m, " ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
251 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir); 251 err_printf(m, " IPEIR: 0x%08x\n", ring->ipeir);
252 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr); 252 err_printf(m, " IPEHR: 0x%08x\n", ring->ipehr);
253 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone); 253 err_printf(m, " INSTDONE: 0x%08x\n", ring->instdone);
254 if (INTEL_INFO(dev)->gen >= 4) { 254 if (INTEL_INFO(dev)->gen >= 4) {
255 err_printf(m, " BBADDR: 0x%08llx\n", ring->bbaddr); 255 err_printf(m, " BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
256 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate); 256 err_printf(m, " BB_STATE: 0x%08x\n", ring->bbstate);
257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps); 257 err_printf(m, " INSTPS: 0x%08x\n", ring->instps);
258 } 258 }
@@ -322,7 +322,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
322 const struct i915_error_state_file_priv *error_priv) 322 const struct i915_error_state_file_priv *error_priv)
323{ 323{
324 struct drm_device *dev = error_priv->dev; 324 struct drm_device *dev = error_priv->dev;
325 drm_i915_private_t *dev_priv = dev->dev_private; 325 struct drm_i915_private *dev_priv = dev->dev_private;
326 struct drm_i915_error_state *error = error_priv->error; 326 struct drm_i915_error_state *error = error_priv->error;
327 int i, j, offset, elt; 327 int i, j, offset, elt;
328 int max_hangcheck_score; 328 int max_hangcheck_score;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index acebe511e4ef..7753249b3a95 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -82,7 +82,7 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
82 82
83/* For display hotplug interrupt */ 83/* For display hotplug interrupt */
84static void 84static void
85ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 85ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
86{ 86{
87 assert_spin_locked(&dev_priv->irq_lock); 87 assert_spin_locked(&dev_priv->irq_lock);
88 88
@@ -100,7 +100,7 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
100} 100}
101 101
102static void 102static void
103ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 103ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
104{ 104{
105 assert_spin_locked(&dev_priv->irq_lock); 105 assert_spin_locked(&dev_priv->irq_lock);
106 106
@@ -596,7 +596,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
596 */ 596 */
597static void i915_enable_asle_pipestat(struct drm_device *dev) 597static void i915_enable_asle_pipestat(struct drm_device *dev)
598{ 598{
599 drm_i915_private_t *dev_priv = dev->dev_private; 599 struct drm_i915_private *dev_priv = dev->dev_private;
600 unsigned long irqflags; 600 unsigned long irqflags;
601 601
602 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 602 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
@@ -624,7 +624,7 @@ static void i915_enable_asle_pipestat(struct drm_device *dev)
624static int 624static int
625i915_pipe_enabled(struct drm_device *dev, int pipe) 625i915_pipe_enabled(struct drm_device *dev, int pipe)
626{ 626{
627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 627 struct drm_i915_private *dev_priv = dev->dev_private;
628 628
629 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 629 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
630 /* Locking is horribly broken here, but whatever. */ 630 /* Locking is horribly broken here, but whatever. */
@@ -648,7 +648,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
648 */ 648 */
649static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 649static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
650{ 650{
651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 651 struct drm_i915_private *dev_priv = dev->dev_private;
652 unsigned long high_frame; 652 unsigned long high_frame;
653 unsigned long low_frame; 653 unsigned long low_frame;
654 u32 high1, high2, low, pixel, vbl_start; 654 u32 high1, high2, low, pixel, vbl_start;
@@ -704,7 +704,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
704 704
705static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 705static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
706{ 706{
707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 707 struct drm_i915_private *dev_priv = dev->dev_private;
708 int reg = PIPE_FRMCOUNT_GM45(pipe); 708 int reg = PIPE_FRMCOUNT_GM45(pipe);
709 709
710 if (!i915_pipe_enabled(dev, pipe)) { 710 if (!i915_pipe_enabled(dev, pipe)) {
@@ -718,33 +718,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
718 718
719/* raw reads, only for fast reads of display block, no need for forcewake etc. */ 719/* raw reads, only for fast reads of display block, no need for forcewake etc. */
720#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 720#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
721#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
722 721
723static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 722static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
724{ 723{
725 struct drm_i915_private *dev_priv = dev->dev_private; 724 struct drm_i915_private *dev_priv = dev->dev_private;
726 uint32_t status; 725 uint32_t status;
727 726 int reg;
728 if (INTEL_INFO(dev)->gen < 7) { 727
729 status = pipe == PIPE_A ? 728 if (INTEL_INFO(dev)->gen >= 8) {
730 DE_PIPEA_VBLANK : 729 status = GEN8_PIPE_VBLANK;
731 DE_PIPEB_VBLANK; 730 reg = GEN8_DE_PIPE_ISR(pipe);
731 } else if (INTEL_INFO(dev)->gen >= 7) {
732 status = DE_PIPE_VBLANK_IVB(pipe);
733 reg = DEISR;
732 } else { 734 } else {
733 switch (pipe) { 735 status = DE_PIPE_VBLANK(pipe);
734 default: 736 reg = DEISR;
735 case PIPE_A:
736 status = DE_PIPEA_VBLANK_IVB;
737 break;
738 case PIPE_B:
739 status = DE_PIPEB_VBLANK_IVB;
740 break;
741 case PIPE_C:
742 status = DE_PIPEC_VBLANK_IVB;
743 break;
744 }
745 } 737 }
746 738
747 return __raw_i915_read32(dev_priv, DEISR) & status; 739 return __raw_i915_read32(dev_priv, reg) & status;
748} 740}
749 741
750static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 742static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -802,7 +794,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
802 else 794 else
803 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 795 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
804 796
805 if (HAS_PCH_SPLIT(dev)) { 797 if (HAS_DDI(dev)) {
798 /*
799 * On HSW HDMI outputs there seems to be a 2 line
800 * difference, whereas eDP has the normal 1 line
801 * difference that earlier platforms have. External
802 * DP is unknown. For now just check for the 2 line
803 * difference case on all output types on HSW+.
804 *
805 * This might misinterpret the scanline counter being
806 * one line too far along on eDP, but that's less
807 * dangerous than the alternative since that would lead
808 * the vblank timestamp code astray when it sees a
809 * scanline count before vblank_start during a vblank
810 * interrupt.
811 */
812 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
813 if ((in_vbl && (position == vbl_start - 2 ||
814 position == vbl_start - 1)) ||
815 (!in_vbl && (position == vbl_end - 2 ||
816 position == vbl_end - 1)))
817 position = (position + 2) % vtotal;
818 } else if (HAS_PCH_SPLIT(dev)) {
806 /* 819 /*
807 * The scanline counter increments at the leading edge 820 * The scanline counter increments at the leading edge
808 * of hsync, ie. it completely misses the active portion 821 * of hsync, ie. it completely misses the active portion
@@ -946,8 +959,8 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
946 959
947static void i915_hotplug_work_func(struct work_struct *work) 960static void i915_hotplug_work_func(struct work_struct *work)
948{ 961{
949 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 962 struct drm_i915_private *dev_priv =
950 hotplug_work); 963 container_of(work, struct drm_i915_private, hotplug_work);
951 struct drm_device *dev = dev_priv->dev; 964 struct drm_device *dev = dev_priv->dev;
952 struct drm_mode_config *mode_config = &dev->mode_config; 965 struct drm_mode_config *mode_config = &dev->mode_config;
953 struct intel_connector *intel_connector; 966 struct intel_connector *intel_connector;
@@ -1022,7 +1035,7 @@ static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1022 1035
1023static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1036static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1024{ 1037{
1025 drm_i915_private_t *dev_priv = dev->dev_private; 1038 struct drm_i915_private *dev_priv = dev->dev_private;
1026 u32 busy_up, busy_down, max_avg, min_avg; 1039 u32 busy_up, busy_down, max_avg, min_avg;
1027 u8 new_delay; 1040 u8 new_delay;
1028 1041
@@ -1071,47 +1084,10 @@ static void notify_ring(struct drm_device *dev,
1071 i915_queue_hangcheck(dev); 1084 i915_queue_hangcheck(dev);
1072} 1085}
1073 1086
1074void gen6_set_pm_mask(struct drm_i915_private *dev_priv,
1075 u32 pm_iir, int new_delay)
1076{
1077 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1078 if (new_delay >= dev_priv->rps.max_freq_softlimit) {
1079 /* Mask UP THRESHOLD Interrupts */
1080 I915_WRITE(GEN6_PMINTRMSK,
1081 I915_READ(GEN6_PMINTRMSK) |
1082 GEN6_PM_RP_UP_THRESHOLD);
1083 dev_priv->rps.rp_up_masked = true;
1084 }
1085 if (dev_priv->rps.rp_down_masked) {
1086 /* UnMask DOWN THRESHOLD Interrupts */
1087 I915_WRITE(GEN6_PMINTRMSK,
1088 I915_READ(GEN6_PMINTRMSK) &
1089 ~GEN6_PM_RP_DOWN_THRESHOLD);
1090 dev_priv->rps.rp_down_masked = false;
1091 }
1092 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1093 if (new_delay <= dev_priv->rps.min_freq_softlimit) {
1094 /* Mask DOWN THRESHOLD Interrupts */
1095 I915_WRITE(GEN6_PMINTRMSK,
1096 I915_READ(GEN6_PMINTRMSK) |
1097 GEN6_PM_RP_DOWN_THRESHOLD);
1098 dev_priv->rps.rp_down_masked = true;
1099 }
1100
1101 if (dev_priv->rps.rp_up_masked) {
1102 /* UnMask UP THRESHOLD Interrupts */
1103 I915_WRITE(GEN6_PMINTRMSK,
1104 I915_READ(GEN6_PMINTRMSK) &
1105 ~GEN6_PM_RP_UP_THRESHOLD);
1106 dev_priv->rps.rp_up_masked = false;
1107 }
1108 }
1109}
1110
1111static void gen6_pm_rps_work(struct work_struct *work) 1087static void gen6_pm_rps_work(struct work_struct *work)
1112{ 1088{
1113 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1089 struct drm_i915_private *dev_priv =
1114 rps.work); 1090 container_of(work, struct drm_i915_private, rps.work);
1115 u32 pm_iir; 1091 u32 pm_iir;
1116 int new_delay, adj; 1092 int new_delay, adj;
1117 1093
@@ -1119,13 +1095,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
1119 pm_iir = dev_priv->rps.pm_iir; 1095 pm_iir = dev_priv->rps.pm_iir;
1120 dev_priv->rps.pm_iir = 0; 1096 dev_priv->rps.pm_iir = 0;
1121 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 1097 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
1122 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 1098 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1123 spin_unlock_irq(&dev_priv->irq_lock); 1099 spin_unlock_irq(&dev_priv->irq_lock);
1124 1100
1125 /* Make sure we didn't queue anything we're not going to process. */ 1101 /* Make sure we didn't queue anything we're not going to process. */
1126 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 1102 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1127 1103
1128 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 1104 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1129 return; 1105 return;
1130 1106
1131 mutex_lock(&dev_priv->rps.hw_lock); 1107 mutex_lock(&dev_priv->rps.hw_lock);
@@ -1167,7 +1143,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
1167 dev_priv->rps.min_freq_softlimit, 1143 dev_priv->rps.min_freq_softlimit,
1168 dev_priv->rps.max_freq_softlimit); 1144 dev_priv->rps.max_freq_softlimit);
1169 1145
1170 gen6_set_pm_mask(dev_priv, pm_iir, new_delay);
1171 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1146 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1172 1147
1173 if (IS_VALLEYVIEW(dev_priv->dev)) 1148 if (IS_VALLEYVIEW(dev_priv->dev))
@@ -1190,8 +1165,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
1190 */ 1165 */
1191static void ivybridge_parity_work(struct work_struct *work) 1166static void ivybridge_parity_work(struct work_struct *work)
1192{ 1167{
1193 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1168 struct drm_i915_private *dev_priv =
1194 l3_parity.error_work); 1169 container_of(work, struct drm_i915_private, l3_parity.error_work);
1195 u32 error_status, row, bank, subbank; 1170 u32 error_status, row, bank, subbank;
1196 char *parity_event[6]; 1171 char *parity_event[6];
1197 uint32_t misccpctl; 1172 uint32_t misccpctl;
@@ -1263,7 +1238,7 @@ out:
1263 1238
1264static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1239static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1265{ 1240{
1266 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1241 struct drm_i915_private *dev_priv = dev->dev_private;
1267 1242
1268 if (!HAS_L3_DPF(dev)) 1243 if (!HAS_L3_DPF(dev))
1269 return; 1244 return;
@@ -1374,7 +1349,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1374 u32 hotplug_trigger, 1349 u32 hotplug_trigger,
1375 const u32 *hpd) 1350 const u32 *hpd)
1376{ 1351{
1377 drm_i915_private_t *dev_priv = dev->dev_private; 1352 struct drm_i915_private *dev_priv = dev->dev_private;
1378 int i; 1353 int i;
1379 bool storm_detected = false; 1354 bool storm_detected = false;
1380 1355
@@ -1430,14 +1405,14 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1430 1405
1431static void gmbus_irq_handler(struct drm_device *dev) 1406static void gmbus_irq_handler(struct drm_device *dev)
1432{ 1407{
1433 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1408 struct drm_i915_private *dev_priv = dev->dev_private;
1434 1409
1435 wake_up_all(&dev_priv->gmbus_wait_queue); 1410 wake_up_all(&dev_priv->gmbus_wait_queue);
1436} 1411}
1437 1412
1438static void dp_aux_irq_handler(struct drm_device *dev) 1413static void dp_aux_irq_handler(struct drm_device *dev)
1439{ 1414{
1440 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1415 struct drm_i915_private *dev_priv = dev->dev_private;
1441 1416
1442 wake_up_all(&dev_priv->gmbus_wait_queue); 1417 wake_up_all(&dev_priv->gmbus_wait_queue);
1443} 1418}
@@ -1543,10 +1518,10 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1543 * the work queue. */ 1518 * the work queue. */
1544static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1519static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1545{ 1520{
1546 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1521 if (pm_iir & dev_priv->pm_rps_events) {
1547 spin_lock(&dev_priv->irq_lock); 1522 spin_lock(&dev_priv->irq_lock);
1548 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1523 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1549 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1524 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1550 spin_unlock(&dev_priv->irq_lock); 1525 spin_unlock(&dev_priv->irq_lock);
1551 1526
1552 queue_work(dev_priv->wq, &dev_priv->rps.work); 1527 queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1637,7 +1612,7 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1637static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1612static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1638{ 1613{
1639 struct drm_device *dev = (struct drm_device *) arg; 1614 struct drm_device *dev = (struct drm_device *) arg;
1640 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1615 struct drm_i915_private *dev_priv = dev->dev_private;
1641 u32 iir, gt_iir, pm_iir; 1616 u32 iir, gt_iir, pm_iir;
1642 irqreturn_t ret = IRQ_NONE; 1617 irqreturn_t ret = IRQ_NONE;
1643 1618
@@ -1684,7 +1659,7 @@ out:
1684 1659
1685static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1660static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1686{ 1661{
1687 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1662 struct drm_i915_private *dev_priv = dev->dev_private;
1688 int pipe; 1663 int pipe;
1689 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1664 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1690 1665
@@ -1791,7 +1766,7 @@ static void cpt_serr_int_handler(struct drm_device *dev)
1791 1766
1792static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1767static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1793{ 1768{
1794 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1769 struct drm_i915_private *dev_priv = dev->dev_private;
1795 int pipe; 1770 int pipe;
1796 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1771 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1797 1772
@@ -1915,7 +1890,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1915static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1890static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1916{ 1891{
1917 struct drm_device *dev = (struct drm_device *) arg; 1892 struct drm_device *dev = (struct drm_device *) arg;
1918 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1893 struct drm_i915_private *dev_priv = dev->dev_private;
1919 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1894 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1920 irqreturn_t ret = IRQ_NONE; 1895 irqreturn_t ret = IRQ_NONE;
1921 1896
@@ -2126,8 +2101,8 @@ static void i915_error_work_func(struct work_struct *work)
2126{ 2101{
2127 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2102 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2128 work); 2103 work);
2129 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 2104 struct drm_i915_private *dev_priv =
2130 gpu_error); 2105 container_of(error, struct drm_i915_private, gpu_error);
2131 struct drm_device *dev = dev_priv->dev; 2106 struct drm_device *dev = dev_priv->dev;
2132 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2107 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2133 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2108 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
@@ -2336,7 +2311,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
2336 2311
2337static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2312static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2338{ 2313{
2339 drm_i915_private_t *dev_priv = dev->dev_private; 2314 struct drm_i915_private *dev_priv = dev->dev_private;
2340 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2315 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2316 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2342 struct drm_i915_gem_object *obj; 2317 struct drm_i915_gem_object *obj;
@@ -2385,7 +2360,7 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
2385 */ 2360 */
2386static int i915_enable_vblank(struct drm_device *dev, int pipe) 2361static int i915_enable_vblank(struct drm_device *dev, int pipe)
2387{ 2362{
2388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2363 struct drm_i915_private *dev_priv = dev->dev_private;
2389 unsigned long irqflags; 2364 unsigned long irqflags;
2390 2365
2391 if (!i915_pipe_enabled(dev, pipe)) 2366 if (!i915_pipe_enabled(dev, pipe))
@@ -2409,7 +2384,7 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe)
2409 2384
2410static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2385static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2411{ 2386{
2412 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2387 struct drm_i915_private *dev_priv = dev->dev_private;
2413 unsigned long irqflags; 2388 unsigned long irqflags;
2414 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2389 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2415 DE_PIPE_VBLANK(pipe); 2390 DE_PIPE_VBLANK(pipe);
@@ -2426,7 +2401,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2426 2401
2427static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2402static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2428{ 2403{
2429 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2404 struct drm_i915_private *dev_priv = dev->dev_private;
2430 unsigned long irqflags; 2405 unsigned long irqflags;
2431 2406
2432 if (!i915_pipe_enabled(dev, pipe)) 2407 if (!i915_pipe_enabled(dev, pipe))
@@ -2461,7 +2436,7 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2461 */ 2436 */
2462static void i915_disable_vblank(struct drm_device *dev, int pipe) 2437static void i915_disable_vblank(struct drm_device *dev, int pipe)
2463{ 2438{
2464 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2439 struct drm_i915_private *dev_priv = dev->dev_private;
2465 unsigned long irqflags; 2440 unsigned long irqflags;
2466 2441
2467 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2442 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2476,7 +2451,7 @@ static void i915_disable_vblank(struct drm_device *dev, int pipe)
2476 2451
2477static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2452static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2478{ 2453{
2479 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2454 struct drm_i915_private *dev_priv = dev->dev_private;
2480 unsigned long irqflags; 2455 unsigned long irqflags;
2481 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2456 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2482 DE_PIPE_VBLANK(pipe); 2457 DE_PIPE_VBLANK(pipe);
@@ -2488,7 +2463,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2488 2463
2489static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2464static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2490{ 2465{
2491 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2466 struct drm_i915_private *dev_priv = dev->dev_private;
2492 unsigned long irqflags; 2467 unsigned long irqflags;
2493 2468
2494 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2469 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2530,29 +2505,43 @@ static struct intel_ring_buffer *
2530semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2505semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2531{ 2506{
2532 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2507 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2533 u32 cmd, ipehr, acthd, acthd_min; 2508 u32 cmd, ipehr, head;
2509 int i;
2534 2510
2535 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2511 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2536 if ((ipehr & ~(0x3 << 16)) != 2512 if ((ipehr & ~(0x3 << 16)) !=
2537 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2513 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2538 return NULL; 2514 return NULL;
2539 2515
2540 /* ACTHD is likely pointing to the dword after the actual command, 2516 /*
2541 * so scan backwards until we find the MBOX. 2517 * HEAD is likely pointing to the dword after the actual command,
2518 * so scan backwards until we find the MBOX. But limit it to just 3
2519 * dwords. Note that we don't care about ACTHD here since that might
2520 * point at at batch, and semaphores are always emitted into the
2521 * ringbuffer itself.
2542 */ 2522 */
2543 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2523 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2544 acthd_min = max((int)acthd - 3 * 4, 0); 2524
2545 do { 2525 for (i = 4; i; --i) {
2546 cmd = ioread32(ring->virtual_start + acthd); 2526 /*
2527 * Be paranoid and presume the hw has gone off into the wild -
2528 * our ring is smaller than what the hardware (and hence
2529 * HEAD_ADDR) allows. Also handles wrap-around.
2530 */
2531 head &= ring->size - 1;
2532
2533 /* This here seems to blow up */
2534 cmd = ioread32(ring->virtual_start + head);
2547 if (cmd == ipehr) 2535 if (cmd == ipehr)
2548 break; 2536 break;
2549 2537
2550 acthd -= 4; 2538 head -= 4;
2551 if (acthd < acthd_min) 2539 }
2552 return NULL;
2553 } while (1);
2554 2540
2555 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2541 if (!i)
2542 return NULL;
2543
2544 *seqno = ioread32(ring->virtual_start + head + 4) + 1;
2556 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2545 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2557} 2546}
2558 2547
@@ -2586,7 +2575,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2586} 2575}
2587 2576
2588static enum intel_ring_hangcheck_action 2577static enum intel_ring_hangcheck_action
2589ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2578ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
2590{ 2579{
2591 struct drm_device *dev = ring->dev; 2580 struct drm_device *dev = ring->dev;
2592 struct drm_i915_private *dev_priv = dev->dev_private; 2581 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2641,7 +2630,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2641static void i915_hangcheck_elapsed(unsigned long data) 2630static void i915_hangcheck_elapsed(unsigned long data)
2642{ 2631{
2643 struct drm_device *dev = (struct drm_device *)data; 2632 struct drm_device *dev = (struct drm_device *)data;
2644 drm_i915_private_t *dev_priv = dev->dev_private; 2633 struct drm_i915_private *dev_priv = dev->dev_private;
2645 struct intel_ring_buffer *ring; 2634 struct intel_ring_buffer *ring;
2646 int i; 2635 int i;
2647 int busy_count = 0, rings_hung = 0; 2636 int busy_count = 0, rings_hung = 0;
@@ -2654,7 +2643,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
2654 return; 2643 return;
2655 2644
2656 for_each_ring(ring, dev_priv, i) { 2645 for_each_ring(ring, dev_priv, i) {
2657 u32 seqno, acthd; 2646 u64 acthd;
2647 u32 seqno;
2658 bool busy = true; 2648 bool busy = true;
2659 2649
2660 semaphore_clear_deadlocks(dev_priv); 2650 semaphore_clear_deadlocks(dev_priv);
@@ -2799,7 +2789,7 @@ static void gen5_gt_irq_preinstall(struct drm_device *dev)
2799*/ 2789*/
2800static void ironlake_irq_preinstall(struct drm_device *dev) 2790static void ironlake_irq_preinstall(struct drm_device *dev)
2801{ 2791{
2802 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2792 struct drm_i915_private *dev_priv = dev->dev_private;
2803 2793
2804 I915_WRITE(HWSTAM, 0xeffe); 2794 I915_WRITE(HWSTAM, 0xeffe);
2805 2795
@@ -2814,7 +2804,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
2814 2804
2815static void valleyview_irq_preinstall(struct drm_device *dev) 2805static void valleyview_irq_preinstall(struct drm_device *dev)
2816{ 2806{
2817 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2807 struct drm_i915_private *dev_priv = dev->dev_private;
2818 int pipe; 2808 int pipe;
2819 2809
2820 /* VLV magic */ 2810 /* VLV magic */
@@ -2890,7 +2880,7 @@ static void gen8_irq_preinstall(struct drm_device *dev)
2890 2880
2891static void ibx_hpd_irq_setup(struct drm_device *dev) 2881static void ibx_hpd_irq_setup(struct drm_device *dev)
2892{ 2882{
2893 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2883 struct drm_i915_private *dev_priv = dev->dev_private;
2894 struct drm_mode_config *mode_config = &dev->mode_config; 2884 struct drm_mode_config *mode_config = &dev->mode_config;
2895 struct intel_encoder *intel_encoder; 2885 struct intel_encoder *intel_encoder;
2896 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2886 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
@@ -2925,17 +2915,16 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
2925 2915
2926static void ibx_irq_postinstall(struct drm_device *dev) 2916static void ibx_irq_postinstall(struct drm_device *dev)
2927{ 2917{
2928 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2918 struct drm_i915_private *dev_priv = dev->dev_private;
2929 u32 mask; 2919 u32 mask;
2930 2920
2931 if (HAS_PCH_NOP(dev)) 2921 if (HAS_PCH_NOP(dev))
2932 return; 2922 return;
2933 2923
2934 if (HAS_PCH_IBX(dev)) { 2924 if (HAS_PCH_IBX(dev)) {
2935 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | 2925 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
2936 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2937 } else { 2926 } else {
2938 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; 2927 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2939 2928
2940 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2929 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2941 } 2930 }
@@ -2972,7 +2961,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2972 POSTING_READ(GTIER); 2961 POSTING_READ(GTIER);
2973 2962
2974 if (INTEL_INFO(dev)->gen >= 6) { 2963 if (INTEL_INFO(dev)->gen >= 6) {
2975 pm_irqs |= GEN6_PM_RPS_EVENTS; 2964 pm_irqs |= dev_priv->pm_rps_events;
2976 2965
2977 if (HAS_VEBOX(dev)) 2966 if (HAS_VEBOX(dev))
2978 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2967 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -2988,27 +2977,26 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
2988static int ironlake_irq_postinstall(struct drm_device *dev) 2977static int ironlake_irq_postinstall(struct drm_device *dev)
2989{ 2978{
2990 unsigned long irqflags; 2979 unsigned long irqflags;
2991 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2980 struct drm_i915_private *dev_priv = dev->dev_private;
2992 u32 display_mask, extra_mask; 2981 u32 display_mask, extra_mask;
2993 2982
2994 if (INTEL_INFO(dev)->gen >= 7) { 2983 if (INTEL_INFO(dev)->gen >= 7) {
2995 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2984 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
2996 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2985 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
2997 DE_PLANEB_FLIP_DONE_IVB | 2986 DE_PLANEB_FLIP_DONE_IVB |
2998 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | 2987 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
2999 DE_ERR_INT_IVB);
3000 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2988 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3001 DE_PIPEA_VBLANK_IVB); 2989 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3002 2990
3003 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2991 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3004 } else { 2992 } else {
3005 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2993 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3006 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2994 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3007 DE_AUX_CHANNEL_A | 2995 DE_AUX_CHANNEL_A |
3008 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3009 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2996 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3010 DE_POISON); 2997 DE_POISON);
3011 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; 2998 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
2999 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3012 } 3000 }
3013 3001
3014 dev_priv->irq_mask = ~display_mask; 3002 dev_priv->irq_mask = ~display_mask;
@@ -3126,7 +3114,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3126 3114
3127static int valleyview_irq_postinstall(struct drm_device *dev) 3115static int valleyview_irq_postinstall(struct drm_device *dev)
3128{ 3116{
3129 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3117 struct drm_i915_private *dev_priv = dev->dev_private;
3130 unsigned long irqflags; 3118 unsigned long irqflags;
3131 3119
3132 dev_priv->irq_mask = ~0; 3120 dev_priv->irq_mask = ~0;
@@ -3193,9 +3181,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3193 struct drm_device *dev = dev_priv->dev; 3181 struct drm_device *dev = dev_priv->dev;
3194 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 3182 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
3195 GEN8_PIPE_CDCLK_CRC_DONE | 3183 GEN8_PIPE_CDCLK_CRC_DONE |
3196 GEN8_PIPE_FIFO_UNDERRUN |
3197 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3184 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3198 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; 3185 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3186 GEN8_PIPE_FIFO_UNDERRUN;
3199 int pipe; 3187 int pipe;
3200 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3188 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3201 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3189 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
@@ -3273,7 +3261,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3273 3261
3274static void valleyview_irq_uninstall(struct drm_device *dev) 3262static void valleyview_irq_uninstall(struct drm_device *dev)
3275{ 3263{
3276 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3264 struct drm_i915_private *dev_priv = dev->dev_private;
3277 unsigned long irqflags; 3265 unsigned long irqflags;
3278 int pipe; 3266 int pipe;
3279 3267
@@ -3304,7 +3292,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3304 3292
3305static void ironlake_irq_uninstall(struct drm_device *dev) 3293static void ironlake_irq_uninstall(struct drm_device *dev)
3306{ 3294{
3307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3295 struct drm_i915_private *dev_priv = dev->dev_private;
3308 3296
3309 if (!dev_priv) 3297 if (!dev_priv)
3310 return; 3298 return;
@@ -3335,7 +3323,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3335 3323
3336static void i8xx_irq_preinstall(struct drm_device * dev) 3324static void i8xx_irq_preinstall(struct drm_device * dev)
3337{ 3325{
3338 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3326 struct drm_i915_private *dev_priv = dev->dev_private;
3339 int pipe; 3327 int pipe;
3340 3328
3341 for_each_pipe(pipe) 3329 for_each_pipe(pipe)
@@ -3347,7 +3335,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3347 3335
3348static int i8xx_irq_postinstall(struct drm_device *dev) 3336static int i8xx_irq_postinstall(struct drm_device *dev)
3349{ 3337{
3350 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3338 struct drm_i915_private *dev_priv = dev->dev_private;
3351 unsigned long irqflags; 3339 unsigned long irqflags;
3352 3340
3353 I915_WRITE16(EMR, 3341 I915_WRITE16(EMR,
@@ -3385,7 +3373,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3385static bool i8xx_handle_vblank(struct drm_device *dev, 3373static bool i8xx_handle_vblank(struct drm_device *dev,
3386 int plane, int pipe, u32 iir) 3374 int plane, int pipe, u32 iir)
3387{ 3375{
3388 drm_i915_private_t *dev_priv = dev->dev_private; 3376 struct drm_i915_private *dev_priv = dev->dev_private;
3389 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3377 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3390 3378
3391 if (!drm_handle_vblank(dev, pipe)) 3379 if (!drm_handle_vblank(dev, pipe))
@@ -3413,7 +3401,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
3413static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3401static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3414{ 3402{
3415 struct drm_device *dev = (struct drm_device *) arg; 3403 struct drm_device *dev = (struct drm_device *) arg;
3416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3404 struct drm_i915_private *dev_priv = dev->dev_private;
3417 u16 iir, new_iir; 3405 u16 iir, new_iir;
3418 u32 pipe_stats[2]; 3406 u32 pipe_stats[2];
3419 unsigned long irqflags; 3407 unsigned long irqflags;
@@ -3483,7 +3471,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3483 3471
3484static void i8xx_irq_uninstall(struct drm_device * dev) 3472static void i8xx_irq_uninstall(struct drm_device * dev)
3485{ 3473{
3486 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3474 struct drm_i915_private *dev_priv = dev->dev_private;
3487 int pipe; 3475 int pipe;
3488 3476
3489 for_each_pipe(pipe) { 3477 for_each_pipe(pipe) {
@@ -3498,7 +3486,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
3498 3486
3499static void i915_irq_preinstall(struct drm_device * dev) 3487static void i915_irq_preinstall(struct drm_device * dev)
3500{ 3488{
3501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3489 struct drm_i915_private *dev_priv = dev->dev_private;
3502 int pipe; 3490 int pipe;
3503 3491
3504 if (I915_HAS_HOTPLUG(dev)) { 3492 if (I915_HAS_HOTPLUG(dev)) {
@@ -3516,7 +3504,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
3516 3504
3517static int i915_irq_postinstall(struct drm_device *dev) 3505static int i915_irq_postinstall(struct drm_device *dev)
3518{ 3506{
3519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3507 struct drm_i915_private *dev_priv = dev->dev_private;
3520 u32 enable_mask; 3508 u32 enable_mask;
3521 unsigned long irqflags; 3509 unsigned long irqflags;
3522 3510
@@ -3570,7 +3558,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
3570static bool i915_handle_vblank(struct drm_device *dev, 3558static bool i915_handle_vblank(struct drm_device *dev,
3571 int plane, int pipe, u32 iir) 3559 int plane, int pipe, u32 iir)
3572{ 3560{
3573 drm_i915_private_t *dev_priv = dev->dev_private; 3561 struct drm_i915_private *dev_priv = dev->dev_private;
3574 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3562 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3575 3563
3576 if (!drm_handle_vblank(dev, pipe)) 3564 if (!drm_handle_vblank(dev, pipe))
@@ -3598,7 +3586,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
3598static irqreturn_t i915_irq_handler(int irq, void *arg) 3586static irqreturn_t i915_irq_handler(int irq, void *arg)
3599{ 3587{
3600 struct drm_device *dev = (struct drm_device *) arg; 3588 struct drm_device *dev = (struct drm_device *) arg;
3601 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3589 struct drm_i915_private *dev_priv = dev->dev_private;
3602 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3590 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3603 unsigned long irqflags; 3591 unsigned long irqflags;
3604 u32 flip_mask = 3592 u32 flip_mask =
@@ -3704,7 +3692,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
3704 3692
3705static void i915_irq_uninstall(struct drm_device * dev) 3693static void i915_irq_uninstall(struct drm_device * dev)
3706{ 3694{
3707 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3695 struct drm_i915_private *dev_priv = dev->dev_private;
3708 int pipe; 3696 int pipe;
3709 3697
3710 intel_hpd_irq_uninstall(dev_priv); 3698 intel_hpd_irq_uninstall(dev_priv);
@@ -3728,7 +3716,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
3728 3716
3729static void i965_irq_preinstall(struct drm_device * dev) 3717static void i965_irq_preinstall(struct drm_device * dev)
3730{ 3718{
3731 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3719 struct drm_i915_private *dev_priv = dev->dev_private;
3732 int pipe; 3720 int pipe;
3733 3721
3734 I915_WRITE(PORT_HOTPLUG_EN, 0); 3722 I915_WRITE(PORT_HOTPLUG_EN, 0);
@@ -3744,7 +3732,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
3744 3732
3745static int i965_irq_postinstall(struct drm_device *dev) 3733static int i965_irq_postinstall(struct drm_device *dev)
3746{ 3734{
3747 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3735 struct drm_i915_private *dev_priv = dev->dev_private;
3748 u32 enable_mask; 3736 u32 enable_mask;
3749 u32 error_mask; 3737 u32 error_mask;
3750 unsigned long irqflags; 3738 unsigned long irqflags;
@@ -3803,7 +3791,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
3803 3791
3804static void i915_hpd_irq_setup(struct drm_device *dev) 3792static void i915_hpd_irq_setup(struct drm_device *dev)
3805{ 3793{
3806 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3794 struct drm_i915_private *dev_priv = dev->dev_private;
3807 struct drm_mode_config *mode_config = &dev->mode_config; 3795 struct drm_mode_config *mode_config = &dev->mode_config;
3808 struct intel_encoder *intel_encoder; 3796 struct intel_encoder *intel_encoder;
3809 u32 hotplug_en; 3797 u32 hotplug_en;
@@ -3835,7 +3823,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
3835static irqreturn_t i965_irq_handler(int irq, void *arg) 3823static irqreturn_t i965_irq_handler(int irq, void *arg)
3836{ 3824{
3837 struct drm_device *dev = (struct drm_device *) arg; 3825 struct drm_device *dev = (struct drm_device *) arg;
3838 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3826 struct drm_i915_private *dev_priv = dev->dev_private;
3839 u32 iir, new_iir; 3827 u32 iir, new_iir;
3840 u32 pipe_stats[I915_MAX_PIPES]; 3828 u32 pipe_stats[I915_MAX_PIPES];
3841 unsigned long irqflags; 3829 unsigned long irqflags;
@@ -3953,7 +3941,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
3953 3941
3954static void i965_irq_uninstall(struct drm_device * dev) 3942static void i965_irq_uninstall(struct drm_device * dev)
3955{ 3943{
3956 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3944 struct drm_i915_private *dev_priv = dev->dev_private;
3957 int pipe; 3945 int pipe;
3958 3946
3959 if (!dev_priv) 3947 if (!dev_priv)
@@ -3978,7 +3966,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
3978 3966
3979static void intel_hpd_irq_reenable(unsigned long data) 3967static void intel_hpd_irq_reenable(unsigned long data)
3980{ 3968{
3981 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3969 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
3982 struct drm_device *dev = dev_priv->dev; 3970 struct drm_device *dev = dev_priv->dev;
3983 struct drm_mode_config *mode_config = &dev->mode_config; 3971 struct drm_mode_config *mode_config = &dev->mode_config;
3984 unsigned long irqflags; 3972 unsigned long irqflags;
@@ -4020,6 +4008,9 @@ void intel_irq_init(struct drm_device *dev)
4020 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4008 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4021 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4009 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4022 4010
4011 /* Let's track the enabled rps events */
4012 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4013
4023 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4014 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4024 i915_hangcheck_elapsed, 4015 i915_hangcheck_elapsed,
4025 (unsigned long) dev); 4016 (unsigned long) dev);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 74f7d853eb58..9f5b18d9d885 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -706,6 +706,7 @@ enum punit_power_well {
706#define BLT_HWS_PGA_GEN7 (0x04280) 706#define BLT_HWS_PGA_GEN7 (0x04280)
707#define VEBOX_HWS_PGA_GEN7 (0x04380) 707#define VEBOX_HWS_PGA_GEN7 (0x04380)
708#define RING_ACTHD(base) ((base)+0x74) 708#define RING_ACTHD(base) ((base)+0x74)
709#define RING_ACTHD_UDW(base) ((base)+0x5c)
709#define RING_NOPID(base) ((base)+0x94) 710#define RING_NOPID(base) ((base)+0x94)
710#define RING_IMR(base) ((base)+0xa8) 711#define RING_IMR(base) ((base)+0xa8)
711#define RING_TIMESTAMP(base) ((base)+0x358) 712#define RING_TIMESTAMP(base) ((base)+0x358)
@@ -973,7 +974,8 @@ enum punit_power_well {
973#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */ 974#define CACHE_MODE_0_GEN7 0x7000 /* IVB+ */
974#define HIZ_RAW_STALL_OPT_DISABLE (1<<2) 975#define HIZ_RAW_STALL_OPT_DISABLE (1<<2)
975#define CACHE_MODE_1 0x7004 /* IVB+ */ 976#define CACHE_MODE_1 0x7004 /* IVB+ */
976#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) 977#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
978#define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6)
977 979
978#define GEN6_BLITTER_ECOSKPD 0x221d0 980#define GEN6_BLITTER_ECOSKPD 0x221d0
979#define GEN6_BLITTER_LOCK_SHIFT 16 981#define GEN6_BLITTER_LOCK_SHIFT 16
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 4b4e8f0f9621..aa5a3dc43342 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -804,6 +804,14 @@ static const struct dmi_system_id intel_no_crt[] = {
804 DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"), 804 DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
805 }, 805 },
806 }, 806 },
807 {
808 .callback = intel_no_crt_dmi_callback,
809 .ident = "DELL XPS 8700",
810 .matches = {
811 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
812 DMI_MATCH(DMI_PRODUCT_NAME, "XPS 8700"),
813 },
814 },
807 { } 815 { }
808}; 816};
809 817
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 070bf2e78d61..0ad4e9600063 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1108,8 +1108,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1108 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1108 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1109 enum pipe pipe = 0; 1109 enum pipe pipe = 0;
1110 enum transcoder cpu_transcoder; 1110 enum transcoder cpu_transcoder;
1111 enum intel_display_power_domain power_domain;
1111 uint32_t tmp; 1112 uint32_t tmp;
1112 1113
1114 power_domain = intel_display_port_power_domain(intel_encoder);
1115 if (!intel_display_power_enabled(dev_priv, power_domain))
1116 return false;
1117
1113 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1118 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
1114 return false; 1119 return false;
1115 1120
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6332383abae9..dae976f51d83 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5375,21 +5375,26 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
5375 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 5375 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
5376 struct drm_display_mode *adjusted_mode = 5376 struct drm_display_mode *adjusted_mode =
5377 &intel_crtc->config.adjusted_mode; 5377 &intel_crtc->config.adjusted_mode;
5378 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; 5378 uint32_t crtc_vtotal, crtc_vblank_end;
5379 int vsyncshift = 0;
5379 5380
5380 /* We need to be careful not to changed the adjusted mode, for otherwise 5381 /* We need to be careful not to changed the adjusted mode, for otherwise
5381 * the hw state checker will get angry at the mismatch. */ 5382 * the hw state checker will get angry at the mismatch. */
5382 crtc_vtotal = adjusted_mode->crtc_vtotal; 5383 crtc_vtotal = adjusted_mode->crtc_vtotal;
5383 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 5384 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
5384 5385
5385 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5386 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5386 /* the chip adds 2 halflines automatically */ 5387 /* the chip adds 2 halflines automatically */
5387 crtc_vtotal -= 1; 5388 crtc_vtotal -= 1;
5388 crtc_vblank_end -= 1; 5389 crtc_vblank_end -= 1;
5389 vsyncshift = adjusted_mode->crtc_hsync_start 5390
5390 - adjusted_mode->crtc_htotal / 2; 5391 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5391 } else { 5392 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
5392 vsyncshift = 0; 5393 else
5394 vsyncshift = adjusted_mode->crtc_hsync_start -
5395 adjusted_mode->crtc_htotal / 2;
5396 if (vsyncshift < 0)
5397 vsyncshift += adjusted_mode->crtc_htotal;
5393 } 5398 }
5394 5399
5395 if (INTEL_INFO(dev)->gen > 3) 5400 if (INTEL_INFO(dev)->gen > 3)
@@ -5539,10 +5544,13 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
5539 } 5544 }
5540 } 5545 }
5541 5546
5542 if (!IS_GEN2(dev) && 5547 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
5543 intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 5548 if (INTEL_INFO(dev)->gen < 4 ||
5544 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 5549 intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO))
5545 else 5550 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5551 else
5552 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
5553 } else
5546 pipeconf |= PIPECONF_PROGRESSIVE; 5554 pipeconf |= PIPECONF_PROGRESSIVE;
5547 5555
5548 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) 5556 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
@@ -7751,6 +7759,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7751 struct drm_i915_private *dev_priv = dev->dev_private; 7759 struct drm_i915_private *dev_priv = dev->dev_private;
7752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7753 struct drm_i915_gem_object *obj; 7761 struct drm_i915_gem_object *obj;
7762 unsigned old_width;
7754 uint32_t addr; 7763 uint32_t addr;
7755 int ret; 7764 int ret;
7756 7765
@@ -7841,13 +7850,18 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
7841 7850
7842 mutex_unlock(&dev->struct_mutex); 7851 mutex_unlock(&dev->struct_mutex);
7843 7852
7853 old_width = intel_crtc->cursor_width;
7854
7844 intel_crtc->cursor_addr = addr; 7855 intel_crtc->cursor_addr = addr;
7845 intel_crtc->cursor_bo = obj; 7856 intel_crtc->cursor_bo = obj;
7846 intel_crtc->cursor_width = width; 7857 intel_crtc->cursor_width = width;
7847 intel_crtc->cursor_height = height; 7858 intel_crtc->cursor_height = height;
7848 7859
7849 if (intel_crtc->active) 7860 if (intel_crtc->active) {
7861 if (old_width != width)
7862 intel_update_watermarks(crtc);
7850 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 7863 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
7864 }
7851 7865
7852 return 0; 7866 return 0;
7853fail_unpin: 7867fail_unpin:
@@ -8351,7 +8365,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
8351static void intel_increase_pllclock(struct drm_crtc *crtc) 8365static void intel_increase_pllclock(struct drm_crtc *crtc)
8352{ 8366{
8353 struct drm_device *dev = crtc->dev; 8367 struct drm_device *dev = crtc->dev;
8354 drm_i915_private_t *dev_priv = dev->dev_private; 8368 struct drm_i915_private *dev_priv = dev->dev_private;
8355 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8369 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8356 int pipe = intel_crtc->pipe; 8370 int pipe = intel_crtc->pipe;
8357 int dpll_reg = DPLL(pipe); 8371 int dpll_reg = DPLL(pipe);
@@ -8382,7 +8396,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc)
8382static void intel_decrease_pllclock(struct drm_crtc *crtc) 8396static void intel_decrease_pllclock(struct drm_crtc *crtc)
8383{ 8397{
8384 struct drm_device *dev = crtc->dev; 8398 struct drm_device *dev = crtc->dev;
8385 drm_i915_private_t *dev_priv = dev->dev_private; 8399 struct drm_i915_private *dev_priv = dev->dev_private;
8386 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8387 8401
8388 if (HAS_PCH_SPLIT(dev)) 8402 if (HAS_PCH_SPLIT(dev))
@@ -8523,7 +8537,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
8523static void do_intel_finish_page_flip(struct drm_device *dev, 8537static void do_intel_finish_page_flip(struct drm_device *dev,
8524 struct drm_crtc *crtc) 8538 struct drm_crtc *crtc)
8525{ 8539{
8526 drm_i915_private_t *dev_priv = dev->dev_private; 8540 struct drm_i915_private *dev_priv = dev->dev_private;
8527 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8528 struct intel_unpin_work *work; 8542 struct intel_unpin_work *work;
8529 unsigned long flags; 8543 unsigned long flags;
@@ -8564,7 +8578,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8564 8578
8565void intel_finish_page_flip(struct drm_device *dev, int pipe) 8579void intel_finish_page_flip(struct drm_device *dev, int pipe)
8566{ 8580{
8567 drm_i915_private_t *dev_priv = dev->dev_private; 8581 struct drm_i915_private *dev_priv = dev->dev_private;
8568 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 8582 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
8569 8583
8570 do_intel_finish_page_flip(dev, crtc); 8584 do_intel_finish_page_flip(dev, crtc);
@@ -8572,7 +8586,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
8572 8586
8573void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 8587void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8574{ 8588{
8575 drm_i915_private_t *dev_priv = dev->dev_private; 8589 struct drm_i915_private *dev_priv = dev->dev_private;
8576 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 8590 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
8577 8591
8578 do_intel_finish_page_flip(dev, crtc); 8592 do_intel_finish_page_flip(dev, crtc);
@@ -8580,7 +8594,7 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
8580 8594
8581void intel_prepare_page_flip(struct drm_device *dev, int plane) 8595void intel_prepare_page_flip(struct drm_device *dev, int plane)
8582{ 8596{
8583 drm_i915_private_t *dev_priv = dev->dev_private; 8597 struct drm_i915_private *dev_priv = dev->dev_private;
8584 struct intel_crtc *intel_crtc = 8598 struct intel_crtc *intel_crtc =
8585 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 8599 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
8586 unsigned long flags; 8600 unsigned long flags;
@@ -9755,7 +9769,7 @@ check_encoder_state(struct drm_device *dev)
9755static void 9769static void
9756check_crtc_state(struct drm_device *dev) 9770check_crtc_state(struct drm_device *dev)
9757{ 9771{
9758 drm_i915_private_t *dev_priv = dev->dev_private; 9772 struct drm_i915_private *dev_priv = dev->dev_private;
9759 struct intel_crtc *crtc; 9773 struct intel_crtc *crtc;
9760 struct intel_encoder *encoder; 9774 struct intel_encoder *encoder;
9761 struct intel_crtc_config pipe_config; 9775 struct intel_crtc_config pipe_config;
@@ -9823,7 +9837,7 @@ check_crtc_state(struct drm_device *dev)
9823static void 9837static void
9824check_shared_dpll_state(struct drm_device *dev) 9838check_shared_dpll_state(struct drm_device *dev)
9825{ 9839{
9826 drm_i915_private_t *dev_priv = dev->dev_private; 9840 struct drm_i915_private *dev_priv = dev->dev_private;
9827 struct intel_crtc *crtc; 9841 struct intel_crtc *crtc;
9828 struct intel_dpll_hw_state dpll_hw_state; 9842 struct intel_dpll_hw_state dpll_hw_state;
9829 int i; 9843 int i;
@@ -9896,7 +9910,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
9896 int x, int y, struct drm_framebuffer *fb) 9910 int x, int y, struct drm_framebuffer *fb)
9897{ 9911{
9898 struct drm_device *dev = crtc->dev; 9912 struct drm_device *dev = crtc->dev;
9899 drm_i915_private_t *dev_priv = dev->dev_private; 9913 struct drm_i915_private *dev_priv = dev->dev_private;
9900 struct drm_display_mode *saved_mode; 9914 struct drm_display_mode *saved_mode;
9901 struct intel_crtc_config *pipe_config = NULL; 9915 struct intel_crtc_config *pipe_config = NULL;
9902 struct intel_crtc *intel_crtc; 9916 struct intel_crtc *intel_crtc;
@@ -10543,7 +10557,7 @@ static void intel_shared_dpll_init(struct drm_device *dev)
10543 10557
10544static void intel_crtc_init(struct drm_device *dev, int pipe) 10558static void intel_crtc_init(struct drm_device *dev, int pipe)
10545{ 10559{
10546 drm_i915_private_t *dev_priv = dev->dev_private; 10560 struct drm_i915_private *dev_priv = dev->dev_private;
10547 struct intel_crtc *intel_crtc; 10561 struct intel_crtc *intel_crtc;
10548 int i; 10562 int i;
10549 10563
@@ -11505,6 +11519,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11505 encoder->base.crtc = NULL; 11519 encoder->base.crtc = NULL;
11506 } 11520 }
11507 } 11521 }
11522 if (crtc->active) {
11523 /*
11524 * We start out with underrun reporting disabled to avoid races.
11525 * For correct bookkeeping mark this on active crtcs.
11526 *
11527 * No protection against concurrent access is required - at
11528 * worst a fifo underrun happens which also sets this to false.
11529 */
11530 crtc->cpu_fifo_underrun_disabled = true;
11531 crtc->pch_fifo_underrun_disabled = true;
11532 }
11508} 11533}
11509 11534
11510static void intel_sanitize_encoder(struct intel_encoder *encoder) 11535static void intel_sanitize_encoder(struct intel_encoder *encoder)
@@ -11740,6 +11765,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
11740 struct drm_crtc *c; 11765 struct drm_crtc *c;
11741 struct intel_framebuffer *fb; 11766 struct intel_framebuffer *fb;
11742 11767
11768 mutex_lock(&dev->struct_mutex);
11769 intel_init_gt_powersave(dev);
11770 mutex_unlock(&dev->struct_mutex);
11771
11743 intel_modeset_init_hw(dev); 11772 intel_modeset_init_hw(dev);
11744 11773
11745 intel_setup_overlay(dev); 11774 intel_setup_overlay(dev);
@@ -11826,6 +11855,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
11826 drm_mode_config_cleanup(dev); 11855 drm_mode_config_cleanup(dev);
11827 11856
11828 intel_cleanup_overlay(dev); 11857 intel_cleanup_overlay(dev);
11858
11859 mutex_lock(&dev->struct_mutex);
11860 intel_cleanup_gt_powersave(dev);
11861 mutex_unlock(&dev->struct_mutex);
11829} 11862}
11830 11863
11831/* 11864/*
@@ -11920,7 +11953,7 @@ struct intel_display_error_state {
11920struct intel_display_error_state * 11953struct intel_display_error_state *
11921intel_display_capture_error_state(struct drm_device *dev) 11954intel_display_capture_error_state(struct drm_device *dev)
11922{ 11955{
11923 drm_i915_private_t *dev_priv = dev->dev_private; 11956 struct drm_i915_private *dev_priv = dev->dev_private;
11924 struct intel_display_error_state *error; 11957 struct intel_display_error_state *error;
11925 int transcoders[] = { 11958 int transcoders[] = {
11926 TRANSCODER_A, 11959 TRANSCODER_A,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ce5e5be7e88..a0dad1a2f819 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -314,7 +314,8 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
314 struct drm_device *dev = intel_dp_to_dev(intel_dp); 314 struct drm_device *dev = intel_dp_to_dev(intel_dp);
315 struct drm_i915_private *dev_priv = dev->dev_private; 315 struct drm_i915_private *dev_priv = dev->dev_private;
316 316
317 return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 317 return !dev_priv->pm.suspended &&
318 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
318} 319}
319 320
320static void 321static void
@@ -1621,7 +1622,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1621 val |= EDP_PSR_LINK_DISABLE; 1622 val |= EDP_PSR_LINK_DISABLE;
1622 1623
1623 I915_WRITE(EDP_PSR_CTL(dev), val | 1624 I915_WRITE(EDP_PSR_CTL(dev), val |
1624 IS_BROADWELL(dev) ? 0 : link_entry_time | 1625 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
1625 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1626 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1626 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1627 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1627 EDP_PSR_ENABLE); 1628 EDP_PSR_ENABLE);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fa9910481ab0..0542de982260 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -897,6 +897,8 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
897void intel_display_power_put(struct drm_i915_private *dev_priv, 897void intel_display_power_put(struct drm_i915_private *dev_priv,
898 enum intel_display_power_domain domain); 898 enum intel_display_power_domain domain);
899void intel_power_domains_init_hw(struct drm_i915_private *dev_priv); 899void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
900void intel_init_gt_powersave(struct drm_device *dev);
901void intel_cleanup_gt_powersave(struct drm_device *dev);
900void intel_enable_gt_powersave(struct drm_device *dev); 902void intel_enable_gt_powersave(struct drm_device *dev);
901void intel_disable_gt_powersave(struct drm_device *dev); 903void intel_disable_gt_powersave(struct drm_device *dev);
902void ironlake_teardown_rc6(struct drm_device *dev); 904void ironlake_teardown_rc6(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 2b1d42dbfe13..b4d44e62f0c7 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -518,16 +518,29 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
518 518
519 /* 519 /*
520 * See if the plane fb we found above will fit on this 520 * See if the plane fb we found above will fit on this
521 * pipe. Note we need to use the selected fb's bpp rather 521 * pipe. Note we need to use the selected fb's pitch and bpp
522 * than the current pipe's, since they could be different. 522 * rather than the current pipe's, since they differ.
523 */ 523 */
524 cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay * 524 cur_size = intel_crtc->config.adjusted_mode.crtc_hdisplay;
525 intel_crtc->config.adjusted_mode.crtc_vdisplay; 525 cur_size = cur_size * fb->base.bits_per_pixel / 8;
526 DRM_DEBUG_KMS("pipe %c area: %d\n", pipe_name(intel_crtc->pipe), 526 if (fb->base.pitches[0] < cur_size) {
527 DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
528 pipe_name(intel_crtc->pipe),
529 cur_size, fb->base.pitches[0]);
530 plane_config = NULL;
531 fb = NULL;
532 break;
533 }
534
535 cur_size = intel_crtc->config.adjusted_mode.crtc_vdisplay;
536 cur_size = ALIGN(cur_size, plane_config->tiled ? (IS_GEN2(dev) ? 16 : 8) : 1);
537 cur_size *= fb->base.pitches[0];
538 DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
539 pipe_name(intel_crtc->pipe),
540 intel_crtc->config.adjusted_mode.crtc_hdisplay,
541 intel_crtc->config.adjusted_mode.crtc_vdisplay,
542 fb->base.bits_per_pixel,
527 cur_size); 543 cur_size);
528 cur_size *= fb->base.bits_per_pixel / 8;
529 DRM_DEBUG_KMS("total size %d (bpp %d)\n", cur_size,
530 fb->base.bits_per_pixel / 8);
531 544
532 if (cur_size > max_size) { 545 if (cur_size > max_size) {
533 DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n", 546 DRM_DEBUG_KMS("fb not big enough for plane %c (%d vs %d)\n",
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 623cd328b196..d8adc9104dca 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -189,7 +189,7 @@ struct intel_overlay {
189static struct overlay_registers __iomem * 189static struct overlay_registers __iomem *
190intel_overlay_map_regs(struct intel_overlay *overlay) 190intel_overlay_map_regs(struct intel_overlay *overlay)
191{ 191{
192 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 192 struct drm_i915_private *dev_priv = overlay->dev->dev_private;
193 struct overlay_registers __iomem *regs; 193 struct overlay_registers __iomem *regs;
194 194
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -212,7 +212,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
212 void (*tail)(struct intel_overlay *)) 212 void (*tail)(struct intel_overlay *))
213{ 213{
214 struct drm_device *dev = overlay->dev; 214 struct drm_device *dev = overlay->dev;
215 drm_i915_private_t *dev_priv = dev->dev_private; 215 struct drm_i915_private *dev_priv = dev->dev_private;
216 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 216 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
217 int ret; 217 int ret;
218 218
@@ -262,7 +262,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
262 bool load_polyphase_filter) 262 bool load_polyphase_filter)
263{ 263{
264 struct drm_device *dev = overlay->dev; 264 struct drm_device *dev = overlay->dev;
265 drm_i915_private_t *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = dev->dev_private;
266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 266 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
267 u32 flip_addr = overlay->flip_addr; 267 u32 flip_addr = overlay->flip_addr;
268 u32 tmp; 268 u32 tmp;
@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
362static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) 362static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
363{ 363{
364 struct drm_device *dev = overlay->dev; 364 struct drm_device *dev = overlay->dev;
365 drm_i915_private_t *dev_priv = dev->dev_private; 365 struct drm_i915_private *dev_priv = dev->dev_private;
366 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 366 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
367 int ret; 367 int ret;
368 368
@@ -388,7 +388,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
388static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 388static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
389{ 389{
390 struct drm_device *dev = overlay->dev; 390 struct drm_device *dev = overlay->dev;
391 drm_i915_private_t *dev_priv = dev->dev_private; 391 struct drm_i915_private *dev_priv = dev->dev_private;
392 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 392 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
393 int ret; 393 int ret;
394 394
@@ -834,7 +834,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
834static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 834static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
835{ 835{
836 struct drm_device *dev = overlay->dev; 836 struct drm_device *dev = overlay->dev;
837 drm_i915_private_t *dev_priv = dev->dev_private; 837 struct drm_i915_private *dev_priv = dev->dev_private;
838 u32 pfit_control = I915_READ(PFIT_CONTROL); 838 u32 pfit_control = I915_READ(PFIT_CONTROL);
839 u32 ratio; 839 u32 ratio;
840 840
@@ -1026,7 +1026,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1026 struct drm_file *file_priv) 1026 struct drm_file *file_priv)
1027{ 1027{
1028 struct drm_intel_overlay_put_image *put_image_rec = data; 1028 struct drm_intel_overlay_put_image *put_image_rec = data;
1029 drm_i915_private_t *dev_priv = dev->dev_private; 1029 struct drm_i915_private *dev_priv = dev->dev_private;
1030 struct intel_overlay *overlay; 1030 struct intel_overlay *overlay;
1031 struct drm_mode_object *drmmode_obj; 1031 struct drm_mode_object *drmmode_obj;
1032 struct intel_crtc *crtc; 1032 struct intel_crtc *crtc;
@@ -1226,7 +1226,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1226 struct drm_file *file_priv) 1226 struct drm_file *file_priv)
1227{ 1227{
1228 struct drm_intel_overlay_attrs *attrs = data; 1228 struct drm_intel_overlay_attrs *attrs = data;
1229 drm_i915_private_t *dev_priv = dev->dev_private; 1229 struct drm_i915_private *dev_priv = dev->dev_private;
1230 struct intel_overlay *overlay; 1230 struct intel_overlay *overlay;
1231 struct overlay_registers __iomem *regs; 1231 struct overlay_registers __iomem *regs;
1232 int ret; 1232 int ret;
@@ -1311,7 +1311,7 @@ out_unlock:
1311 1311
1312void intel_setup_overlay(struct drm_device *dev) 1312void intel_setup_overlay(struct drm_device *dev)
1313{ 1313{
1314 drm_i915_private_t *dev_priv = dev->dev_private; 1314 struct drm_i915_private *dev_priv = dev->dev_private;
1315 struct intel_overlay *overlay; 1315 struct intel_overlay *overlay;
1316 struct drm_i915_gem_object *reg_bo; 1316 struct drm_i915_gem_object *reg_bo;
1317 struct overlay_registers __iomem *regs; 1317 struct overlay_registers __iomem *regs;
@@ -1397,7 +1397,7 @@ out_free:
1397 1397
1398void intel_cleanup_overlay(struct drm_device *dev) 1398void intel_cleanup_overlay(struct drm_device *dev)
1399{ 1399{
1400 drm_i915_private_t *dev_priv = dev->dev_private; 1400 struct drm_i915_private *dev_priv = dev->dev_private;
1401 1401
1402 if (!dev_priv->overlay) 1402 if (!dev_priv->overlay)
1403 return; 1403 return;
@@ -1421,7 +1421,7 @@ struct intel_overlay_error_state {
1421static struct overlay_registers __iomem * 1421static struct overlay_registers __iomem *
1422intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1422intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1423{ 1423{
1424 drm_i915_private_t *dev_priv = overlay->dev->dev_private; 1424 struct drm_i915_private *dev_priv = overlay->dev->dev_private;
1425 struct overlay_registers __iomem *regs; 1425 struct overlay_registers __iomem *regs;
1426 1426
1427 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1427 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -1447,7 +1447,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1447struct intel_overlay_error_state * 1447struct intel_overlay_error_state *
1448intel_overlay_capture_error_state(struct drm_device *dev) 1448intel_overlay_capture_error_state(struct drm_device *dev)
1449{ 1449{
1450 drm_i915_private_t *dev_priv = dev->dev_private; 1450 struct drm_i915_private *dev_priv = dev->dev_private;
1451 struct intel_overlay *overlay = dev_priv->overlay; 1451 struct intel_overlay *overlay = dev_priv->overlay;
1452 struct intel_overlay_error_state *error; 1452 struct intel_overlay_error_state *error;
1453 struct overlay_registers __iomem *regs; 1453 struct overlay_registers __iomem *regs;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6e73125fc782..5874716774a7 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -623,7 +623,7 @@ out_disable:
623 623
624static void i915_pineview_get_mem_freq(struct drm_device *dev) 624static void i915_pineview_get_mem_freq(struct drm_device *dev)
625{ 625{
626 drm_i915_private_t *dev_priv = dev->dev_private; 626 struct drm_i915_private *dev_priv = dev->dev_private;
627 u32 tmp; 627 u32 tmp;
628 628
629 tmp = I915_READ(CLKCFG); 629 tmp = I915_READ(CLKCFG);
@@ -662,7 +662,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
662 662
663static void i915_ironlake_get_mem_freq(struct drm_device *dev) 663static void i915_ironlake_get_mem_freq(struct drm_device *dev)
664{ 664{
665 drm_i915_private_t *dev_priv = dev->dev_private; 665 struct drm_i915_private *dev_priv = dev->dev_private;
666 u16 ddrpll, csipll; 666 u16 ddrpll, csipll;
667 667
668 ddrpll = I915_READ16(DDRMPLL1); 668 ddrpll = I915_READ16(DDRMPLL1);
@@ -1136,7 +1136,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1136 /* Use the large buffer method to calculate cursor watermark */ 1136 /* Use the large buffer method to calculate cursor watermark */
1137 line_time_us = max(htotal * 1000 / clock, 1); 1137 line_time_us = max(htotal * 1000 / clock, 1);
1138 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 1138 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1139 entries = line_count * 64 * pixel_size; 1139 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1140 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 1140 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1141 if (tlb_miss > 0) 1141 if (tlb_miss > 0)
1142 entries += tlb_miss; 1142 entries += tlb_miss;
@@ -1222,7 +1222,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
1222 *display_wm = entries + display->guard_size; 1222 *display_wm = entries + display->guard_size;
1223 1223
1224 /* calculate the self-refresh watermark for display cursor */ 1224 /* calculate the self-refresh watermark for display cursor */
1225 entries = line_count * pixel_size * 64; 1225 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1226 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 1226 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1227 *cursor_wm = entries + cursor->guard_size; 1227 *cursor_wm = entries + cursor->guard_size;
1228 1228
@@ -1457,7 +1457,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1457 entries, srwm); 1457 entries, srwm);
1458 1458
1459 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1459 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1460 pixel_size * 64; 1460 pixel_size * to_intel_crtc(crtc)->cursor_width;
1461 entries = DIV_ROUND_UP(entries, 1461 entries = DIV_ROUND_UP(entries,
1462 i965_cursor_wm_info.cacheline_size); 1462 i965_cursor_wm_info.cacheline_size);
1463 cursor_sr = i965_cursor_wm_info.fifo_size - 1463 cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -2120,7 +2120,7 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2120 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8; 2120 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2121 p->cur.bytes_per_pixel = 4; 2121 p->cur.bytes_per_pixel = 4;
2122 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w; 2122 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2123 p->cur.horiz_pixels = 64; 2123 p->cur.horiz_pixels = intel_crtc->cursor_width;
2124 /* TODO: for now, assume primary and cursor planes are always enabled. */ 2124 /* TODO: for now, assume primary and cursor planes are always enabled. */
2125 p->pri.enabled = true; 2125 p->pri.enabled = true;
2126 p->cur.enabled = true; 2126 p->cur.enabled = true;
@@ -3006,6 +3006,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3006 dev_priv->rps.last_adj = 0; 3006 dev_priv->rps.last_adj = 0;
3007} 3007}
3008 3008
3009static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3010{
3011 u32 mask = 0;
3012
3013 if (val > dev_priv->rps.min_freq_softlimit)
3014 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3015 if (val < dev_priv->rps.max_freq_softlimit)
3016 mask |= GEN6_PM_RP_UP_THRESHOLD;
3017
3018 /* IVB and SNB hard hangs on looping batchbuffer
3019 * if GEN6_PM_UP_EI_EXPIRED is masked.
3020 */
3021 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3022 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3023
3024 return ~mask;
3025}
3026
3009/* gen6_set_rps is called to update the frequency request, but should also be 3027/* gen6_set_rps is called to update the frequency request, but should also be
3010 * called when the range (min_delay and max_delay) is modified so that we can 3028 * called when the range (min_delay and max_delay) is modified so that we can
3011 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 3029 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
@@ -3017,36 +3035,31 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3017 WARN_ON(val > dev_priv->rps.max_freq_softlimit); 3035 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3018 WARN_ON(val < dev_priv->rps.min_freq_softlimit); 3036 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3019 3037
3020 if (val == dev_priv->rps.cur_freq) { 3038 /* min/max delay may still have been modified so be sure to
3021 /* min/max delay may still have been modified so be sure to 3039 * write the limits value.
3022 * write the limits value */ 3040 */
3023 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3041 if (val != dev_priv->rps.cur_freq) {
3024 gen6_rps_limits(dev_priv, val)); 3042 gen6_set_rps_thresholds(dev_priv, val);
3025 3043
3026 return; 3044 if (IS_HASWELL(dev))
3045 I915_WRITE(GEN6_RPNSWREQ,
3046 HSW_FREQUENCY(val));
3047 else
3048 I915_WRITE(GEN6_RPNSWREQ,
3049 GEN6_FREQUENCY(val) |
3050 GEN6_OFFSET(0) |
3051 GEN6_AGGRESSIVE_TURBO);
3027 } 3052 }
3028 3053
3029 gen6_set_rps_thresholds(dev_priv, val);
3030
3031 if (IS_HASWELL(dev))
3032 I915_WRITE(GEN6_RPNSWREQ,
3033 HSW_FREQUENCY(val));
3034 else
3035 I915_WRITE(GEN6_RPNSWREQ,
3036 GEN6_FREQUENCY(val) |
3037 GEN6_OFFSET(0) |
3038 GEN6_AGGRESSIVE_TURBO);
3039
3040 /* Make sure we continue to get interrupts 3054 /* Make sure we continue to get interrupts
3041 * until we hit the minimum or maximum frequencies. 3055 * until we hit the minimum or maximum frequencies.
3042 */ 3056 */
3043 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 3057 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3044 gen6_rps_limits(dev_priv, val)); 3058 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3045 3059
3046 POSTING_READ(GEN6_RPNSWREQ); 3060 POSTING_READ(GEN6_RPNSWREQ);
3047 3061
3048 dev_priv->rps.cur_freq = val; 3062 dev_priv->rps.cur_freq = val;
3049
3050 trace_intel_gpu_freq_change(val * 50); 3063 trace_intel_gpu_freq_change(val * 50);
3051} 3064}
3052 3065
@@ -3096,10 +3109,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3096 I915_READ(VLV_GTLC_SURVIVABILITY_REG) & 3109 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3097 ~VLV_GFX_CLK_FORCE_ON_BIT); 3110 ~VLV_GFX_CLK_FORCE_ON_BIT);
3098 3111
3099 /* Unmask Up interrupts */ 3112 I915_WRITE(GEN6_PMINTRMSK,
3100 dev_priv->rps.rp_up_masked = true; 3113 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3101 gen6_set_pm_mask(dev_priv, GEN6_PM_RP_DOWN_THRESHOLD,
3102 dev_priv->rps.min_freq_softlimit);
3103} 3114}
3104 3115
3105void gen6_rps_idle(struct drm_i915_private *dev_priv) 3116void gen6_rps_idle(struct drm_i915_private *dev_priv)
@@ -3145,13 +3156,12 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3145 dev_priv->rps.cur_freq, 3156 dev_priv->rps.cur_freq,
3146 vlv_gpu_freq(dev_priv, val), val); 3157 vlv_gpu_freq(dev_priv, val), val);
3147 3158
3148 if (val == dev_priv->rps.cur_freq) 3159 if (val != dev_priv->rps.cur_freq)
3149 return; 3160 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3150 3161
3151 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 3162 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3152 3163
3153 dev_priv->rps.cur_freq = val; 3164 dev_priv->rps.cur_freq = val;
3154
3155 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 3165 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3156} 3166}
3157 3167
@@ -3160,7 +3170,8 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
3160 struct drm_i915_private *dev_priv = dev->dev_private; 3170 struct drm_i915_private *dev_priv = dev->dev_private;
3161 3171
3162 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 3172 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3163 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); 3173 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3174 ~dev_priv->pm_rps_events);
3164 /* Complete PM interrupt masking here doesn't race with the rps work 3175 /* Complete PM interrupt masking here doesn't race with the rps work
3165 * item again unmasking PM interrupts because that is using a different 3176 * item again unmasking PM interrupts because that is using a different
3166 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 3177 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
@@ -3170,7 +3181,7 @@ static void gen6_disable_rps_interrupts(struct drm_device *dev)
3170 dev_priv->rps.pm_iir = 0; 3181 dev_priv->rps.pm_iir = 0;
3171 spin_unlock_irq(&dev_priv->irq_lock); 3182 spin_unlock_irq(&dev_priv->irq_lock);
3172 3183
3173 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3184 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3174} 3185}
3175 3186
3176static void gen6_disable_rps(struct drm_device *dev) 3187static void gen6_disable_rps(struct drm_device *dev)
@@ -3190,11 +3201,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
3190 I915_WRITE(GEN6_RC_CONTROL, 0); 3201 I915_WRITE(GEN6_RC_CONTROL, 0);
3191 3202
3192 gen6_disable_rps_interrupts(dev); 3203 gen6_disable_rps_interrupts(dev);
3193
3194 if (dev_priv->vlv_pctx) {
3195 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3196 dev_priv->vlv_pctx = NULL;
3197 }
3198} 3204}
3199 3205
3200static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 3206static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -3228,24 +3234,12 @@ int intel_enable_rc6(const struct drm_device *dev)
3228static void gen6_enable_rps_interrupts(struct drm_device *dev) 3234static void gen6_enable_rps_interrupts(struct drm_device *dev)
3229{ 3235{
3230 struct drm_i915_private *dev_priv = dev->dev_private; 3236 struct drm_i915_private *dev_priv = dev->dev_private;
3231 u32 enabled_intrs;
3232 3237
3233 spin_lock_irq(&dev_priv->irq_lock); 3238 spin_lock_irq(&dev_priv->irq_lock);
3234 WARN_ON(dev_priv->rps.pm_iir); 3239 WARN_ON(dev_priv->rps.pm_iir);
3235 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3240 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3236 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3241 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3237 spin_unlock_irq(&dev_priv->irq_lock); 3242 spin_unlock_irq(&dev_priv->irq_lock);
3238
3239 /* only unmask PM interrupts we need. Mask all others. */
3240 enabled_intrs = GEN6_PM_RPS_EVENTS;
3241
3242 /* IVB and SNB hard hangs on looping batchbuffer
3243 * if GEN6_PM_UP_EI_EXPIRED is masked.
3244 */
3245 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
3246 enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED;
3247
3248 I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs);
3249} 3243}
3250 3244
3251static void gen8_enable_rps(struct drm_device *dev) 3245static void gen8_enable_rps(struct drm_device *dev)
@@ -3550,6 +3544,15 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3550 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 3544 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3551} 3545}
3552 3546
3547/* Check that the pctx buffer wasn't move under us. */
3548static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3549{
3550 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3551
3552 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3553 dev_priv->vlv_pctx->stolen->start);
3554}
3555
3553static void valleyview_setup_pctx(struct drm_device *dev) 3556static void valleyview_setup_pctx(struct drm_device *dev)
3554{ 3557{
3555 struct drm_i915_private *dev_priv = dev->dev_private; 3558 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3594,6 +3597,17 @@ out:
3594 dev_priv->vlv_pctx = pctx; 3597 dev_priv->vlv_pctx = pctx;
3595} 3598}
3596 3599
3600static void valleyview_cleanup_pctx(struct drm_device *dev)
3601{
3602 struct drm_i915_private *dev_priv = dev->dev_private;
3603
3604 if (WARN_ON(!dev_priv->vlv_pctx))
3605 return;
3606
3607 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3608 dev_priv->vlv_pctx = NULL;
3609}
3610
3597static void valleyview_enable_rps(struct drm_device *dev) 3611static void valleyview_enable_rps(struct drm_device *dev)
3598{ 3612{
3599 struct drm_i915_private *dev_priv = dev->dev_private; 3613 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3603,6 +3617,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
3603 3617
3604 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 3618 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3605 3619
3620 valleyview_check_pctx(dev_priv);
3621
3606 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 3622 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3607 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 3623 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3608 gtfifodbg); 3624 gtfifodbg);
@@ -3687,9 +3703,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
3687 3703
3688 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 3704 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
3689 3705
3690 dev_priv->rps.rp_up_masked = false;
3691 dev_priv->rps.rp_down_masked = false;
3692
3693 gen6_enable_rps_interrupts(dev); 3706 gen6_enable_rps_interrupts(dev);
3694 3707
3695 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 3708 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
@@ -4422,6 +4435,18 @@ static void intel_init_emon(struct drm_device *dev)
4422 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 4435 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4423} 4436}
4424 4437
4438void intel_init_gt_powersave(struct drm_device *dev)
4439{
4440 if (IS_VALLEYVIEW(dev))
4441 valleyview_setup_pctx(dev);
4442}
4443
4444void intel_cleanup_gt_powersave(struct drm_device *dev)
4445{
4446 if (IS_VALLEYVIEW(dev))
4447 valleyview_cleanup_pctx(dev);
4448}
4449
4425void intel_disable_gt_powersave(struct drm_device *dev) 4450void intel_disable_gt_powersave(struct drm_device *dev)
4426{ 4451{
4427 struct drm_i915_private *dev_priv = dev->dev_private; 4452 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4476,8 +4501,6 @@ void intel_enable_gt_powersave(struct drm_device *dev)
4476 ironlake_enable_rc6(dev); 4501 ironlake_enable_rc6(dev);
4477 intel_init_emon(dev); 4502 intel_init_emon(dev);
4478 } else if (IS_GEN6(dev) || IS_GEN7(dev)) { 4503 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4479 if (IS_VALLEYVIEW(dev))
4480 valleyview_setup_pctx(dev);
4481 /* 4504 /*
4482 * PCU communication is slow and this doesn't need to be 4505 * PCU communication is slow and this doesn't need to be
4483 * done at any specific time, so do this out of our fast path 4506 * done at any specific time, so do this out of our fast path
@@ -4881,6 +4904,10 @@ static void gen8_init_clock_gating(struct drm_device *dev)
4881 /* WaDisableSDEUnitClockGating:bdw */ 4904 /* WaDisableSDEUnitClockGating:bdw */
4882 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 4905 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
4883 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 4906 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
4907
4908 /* Wa4x4STCOptimizationDisable:bdw */
4909 I915_WRITE(CACHE_MODE_1,
4910 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
4884} 4911}
4885 4912
4886static void haswell_init_clock_gating(struct drm_device *dev) 4913static void haswell_init_clock_gating(struct drm_device *dev)
@@ -5037,13 +5064,11 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
5037 mutex_unlock(&dev_priv->rps.hw_lock); 5064 mutex_unlock(&dev_priv->rps.hw_lock);
5038 switch ((val >> 6) & 3) { 5065 switch ((val >> 6) & 3) {
5039 case 0: 5066 case 0:
5040 dev_priv->mem_freq = 800;
5041 break;
5042 case 1: 5067 case 1:
5043 dev_priv->mem_freq = 1066; 5068 dev_priv->mem_freq = 800;
5044 break; 5069 break;
5045 case 2: 5070 case 2:
5046 dev_priv->mem_freq = 1333; 5071 dev_priv->mem_freq = 1066;
5047 break; 5072 break;
5048 case 3: 5073 case 3:
5049 dev_priv->mem_freq = 1333; 5074 dev_priv->mem_freq = 1333;
@@ -5253,6 +5278,9 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5253 bool is_enabled; 5278 bool is_enabled;
5254 int i; 5279 int i;
5255 5280
5281 if (dev_priv->pm.suspended)
5282 return false;
5283
5256 power_domains = &dev_priv->power_domains; 5284 power_domains = &dev_priv->power_domains;
5257 5285
5258 is_enabled = true; 5286 is_enabled = true;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4eb3e062b4e3..6bc68bdcf433 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -406,17 +406,24 @@ gen8_render_ring_flush(struct intel_ring_buffer *ring,
406static void ring_write_tail(struct intel_ring_buffer *ring, 406static void ring_write_tail(struct intel_ring_buffer *ring,
407 u32 value) 407 u32 value)
408{ 408{
409 drm_i915_private_t *dev_priv = ring->dev->dev_private; 409 struct drm_i915_private *dev_priv = ring->dev->dev_private;
410 I915_WRITE_TAIL(ring, value); 410 I915_WRITE_TAIL(ring, value);
411} 411}
412 412
413u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) 413u64 intel_ring_get_active_head(struct intel_ring_buffer *ring)
414{ 414{
415 drm_i915_private_t *dev_priv = ring->dev->dev_private; 415 struct drm_i915_private *dev_priv = ring->dev->dev_private;
416 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? 416 u64 acthd;
417 RING_ACTHD(ring->mmio_base) : ACTHD; 417
418 if (INTEL_INFO(ring->dev)->gen >= 8)
419 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base),
420 RING_ACTHD_UDW(ring->mmio_base));
421 else if (INTEL_INFO(ring->dev)->gen >= 4)
422 acthd = I915_READ(RING_ACTHD(ring->mmio_base));
423 else
424 acthd = I915_READ(ACTHD);
418 425
419 return I915_READ(acthd_reg); 426 return acthd;
420} 427}
421 428
422static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) 429static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
@@ -433,7 +440,7 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
433static int init_ring_common(struct intel_ring_buffer *ring) 440static int init_ring_common(struct intel_ring_buffer *ring)
434{ 441{
435 struct drm_device *dev = ring->dev; 442 struct drm_device *dev = ring->dev;
436 drm_i915_private_t *dev_priv = dev->dev_private; 443 struct drm_i915_private *dev_priv = dev->dev_private;
437 struct drm_i915_gem_object *obj = ring->obj; 444 struct drm_i915_gem_object *obj = ring->obj;
438 int ret = 0; 445 int ret = 0;
439 u32 head; 446 u32 head;
@@ -566,7 +573,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
566 struct drm_i915_private *dev_priv = dev->dev_private; 573 struct drm_i915_private *dev_priv = dev->dev_private;
567 int ret = init_ring_common(ring); 574 int ret = init_ring_common(ring);
568 575
569 if (INTEL_INFO(dev)->gen > 3) 576 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
577 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
570 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 578 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
571 579
572 /* We need to disable the AsyncFlip performance optimisations in order 580 /* We need to disable the AsyncFlip performance optimisations in order
@@ -813,8 +821,11 @@ gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
813 /* Workaround to force correct ordering between irq and seqno writes on 821 /* Workaround to force correct ordering between irq and seqno writes on
814 * ivb (and maybe also on snb) by reading from a CS register (like 822 * ivb (and maybe also on snb) by reading from a CS register (like
815 * ACTHD) before reading the status page. */ 823 * ACTHD) before reading the status page. */
816 if (!lazy_coherency) 824 if (!lazy_coherency) {
817 intel_ring_get_active_head(ring); 825 struct drm_i915_private *dev_priv = ring->dev->dev_private;
826 POSTING_READ(RING_ACTHD(ring->mmio_base));
827 }
828
818 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 829 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
819} 830}
820 831
@@ -846,7 +857,7 @@ static bool
846gen5_ring_get_irq(struct intel_ring_buffer *ring) 857gen5_ring_get_irq(struct intel_ring_buffer *ring)
847{ 858{
848 struct drm_device *dev = ring->dev; 859 struct drm_device *dev = ring->dev;
849 drm_i915_private_t *dev_priv = dev->dev_private; 860 struct drm_i915_private *dev_priv = dev->dev_private;
850 unsigned long flags; 861 unsigned long flags;
851 862
852 if (!dev->irq_enabled) 863 if (!dev->irq_enabled)
@@ -864,7 +875,7 @@ static void
864gen5_ring_put_irq(struct intel_ring_buffer *ring) 875gen5_ring_put_irq(struct intel_ring_buffer *ring)
865{ 876{
866 struct drm_device *dev = ring->dev; 877 struct drm_device *dev = ring->dev;
867 drm_i915_private_t *dev_priv = dev->dev_private; 878 struct drm_i915_private *dev_priv = dev->dev_private;
868 unsigned long flags; 879 unsigned long flags;
869 880
870 spin_lock_irqsave(&dev_priv->irq_lock, flags); 881 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -877,7 +888,7 @@ static bool
877i9xx_ring_get_irq(struct intel_ring_buffer *ring) 888i9xx_ring_get_irq(struct intel_ring_buffer *ring)
878{ 889{
879 struct drm_device *dev = ring->dev; 890 struct drm_device *dev = ring->dev;
880 drm_i915_private_t *dev_priv = dev->dev_private; 891 struct drm_i915_private *dev_priv = dev->dev_private;
881 unsigned long flags; 892 unsigned long flags;
882 893
883 if (!dev->irq_enabled) 894 if (!dev->irq_enabled)
@@ -898,7 +909,7 @@ static void
898i9xx_ring_put_irq(struct intel_ring_buffer *ring) 909i9xx_ring_put_irq(struct intel_ring_buffer *ring)
899{ 910{
900 struct drm_device *dev = ring->dev; 911 struct drm_device *dev = ring->dev;
901 drm_i915_private_t *dev_priv = dev->dev_private; 912 struct drm_i915_private *dev_priv = dev->dev_private;
902 unsigned long flags; 913 unsigned long flags;
903 914
904 spin_lock_irqsave(&dev_priv->irq_lock, flags); 915 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -914,7 +925,7 @@ static bool
914i8xx_ring_get_irq(struct intel_ring_buffer *ring) 925i8xx_ring_get_irq(struct intel_ring_buffer *ring)
915{ 926{
916 struct drm_device *dev = ring->dev; 927 struct drm_device *dev = ring->dev;
917 drm_i915_private_t *dev_priv = dev->dev_private; 928 struct drm_i915_private *dev_priv = dev->dev_private;
918 unsigned long flags; 929 unsigned long flags;
919 930
920 if (!dev->irq_enabled) 931 if (!dev->irq_enabled)
@@ -935,7 +946,7 @@ static void
935i8xx_ring_put_irq(struct intel_ring_buffer *ring) 946i8xx_ring_put_irq(struct intel_ring_buffer *ring)
936{ 947{
937 struct drm_device *dev = ring->dev; 948 struct drm_device *dev = ring->dev;
938 drm_i915_private_t *dev_priv = dev->dev_private; 949 struct drm_i915_private *dev_priv = dev->dev_private;
939 unsigned long flags; 950 unsigned long flags;
940 951
941 spin_lock_irqsave(&dev_priv->irq_lock, flags); 952 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -950,7 +961,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
950void intel_ring_setup_status_page(struct intel_ring_buffer *ring) 961void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
951{ 962{
952 struct drm_device *dev = ring->dev; 963 struct drm_device *dev = ring->dev;
953 drm_i915_private_t *dev_priv = ring->dev->dev_private; 964 struct drm_i915_private *dev_priv = ring->dev->dev_private;
954 u32 mmio = 0; 965 u32 mmio = 0;
955 966
956 /* The ring status page addresses are no longer next to the rest of 967 /* The ring status page addresses are no longer next to the rest of
@@ -1043,7 +1054,7 @@ static bool
1043gen6_ring_get_irq(struct intel_ring_buffer *ring) 1054gen6_ring_get_irq(struct intel_ring_buffer *ring)
1044{ 1055{
1045 struct drm_device *dev = ring->dev; 1056 struct drm_device *dev = ring->dev;
1046 drm_i915_private_t *dev_priv = dev->dev_private; 1057 struct drm_i915_private *dev_priv = dev->dev_private;
1047 unsigned long flags; 1058 unsigned long flags;
1048 1059
1049 if (!dev->irq_enabled) 1060 if (!dev->irq_enabled)
@@ -1068,7 +1079,7 @@ static void
1068gen6_ring_put_irq(struct intel_ring_buffer *ring) 1079gen6_ring_put_irq(struct intel_ring_buffer *ring)
1069{ 1080{
1070 struct drm_device *dev = ring->dev; 1081 struct drm_device *dev = ring->dev;
1071 drm_i915_private_t *dev_priv = dev->dev_private; 1082 struct drm_i915_private *dev_priv = dev->dev_private;
1072 unsigned long flags; 1083 unsigned long flags;
1073 1084
1074 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1085 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1635,7 +1646,7 @@ static int __intel_ring_prepare(struct intel_ring_buffer *ring,
1635int intel_ring_begin(struct intel_ring_buffer *ring, 1646int intel_ring_begin(struct intel_ring_buffer *ring,
1636 int num_dwords) 1647 int num_dwords)
1637{ 1648{
1638 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1649 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1639 int ret; 1650 int ret;
1640 1651
1641 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1652 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
@@ -1697,7 +1708,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1697static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1708static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1698 u32 value) 1709 u32 value)
1699{ 1710{
1700 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1711 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1701 1712
1702 /* Every tail move must follow the sequence below */ 1713 /* Every tail move must follow the sequence below */
1703 1714
@@ -1872,7 +1883,7 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1872 1883
1873int intel_init_render_ring_buffer(struct drm_device *dev) 1884int intel_init_render_ring_buffer(struct drm_device *dev)
1874{ 1885{
1875 drm_i915_private_t *dev_priv = dev->dev_private; 1886 struct drm_i915_private *dev_priv = dev->dev_private;
1876 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1887 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1877 1888
1878 ring->name = "render ring"; 1889 ring->name = "render ring";
@@ -1973,7 +1984,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1973 1984
1974int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 1985int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1975{ 1986{
1976 drm_i915_private_t *dev_priv = dev->dev_private; 1987 struct drm_i915_private *dev_priv = dev->dev_private;
1977 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1988 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1978 int ret; 1989 int ret;
1979 1990
@@ -2041,7 +2052,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2041 2052
2042int intel_init_bsd_ring_buffer(struct drm_device *dev) 2053int intel_init_bsd_ring_buffer(struct drm_device *dev)
2043{ 2054{
2044 drm_i915_private_t *dev_priv = dev->dev_private; 2055 struct drm_i915_private *dev_priv = dev->dev_private;
2045 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 2056 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
2046 2057
2047 ring->name = "bsd ring"; 2058 ring->name = "bsd ring";
@@ -2104,7 +2115,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2104 2115
2105int intel_init_blt_ring_buffer(struct drm_device *dev) 2116int intel_init_blt_ring_buffer(struct drm_device *dev)
2106{ 2117{
2107 drm_i915_private_t *dev_priv = dev->dev_private; 2118 struct drm_i915_private *dev_priv = dev->dev_private;
2108 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 2119 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
2109 2120
2110 ring->name = "blitter ring"; 2121 ring->name = "blitter ring";
@@ -2144,7 +2155,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
2144 2155
2145int intel_init_vebox_ring_buffer(struct drm_device *dev) 2156int intel_init_vebox_ring_buffer(struct drm_device *dev)
2146{ 2157{
2147 drm_i915_private_t *dev_priv = dev->dev_private; 2158 struct drm_i915_private *dev_priv = dev->dev_private;
2148 struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; 2159 struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
2149 2160
2150 ring->name = "video enhancement ring"; 2161 ring->name = "video enhancement ring";
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f11ceb230db4..270a6a973438 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -46,11 +46,11 @@ enum intel_ring_hangcheck_action {
46#define HANGCHECK_SCORE_RING_HUNG 31 46#define HANGCHECK_SCORE_RING_HUNG 31
47 47
48struct intel_ring_hangcheck { 48struct intel_ring_hangcheck {
49 bool deadlock; 49 u64 acthd;
50 u32 seqno; 50 u32 seqno;
51 u32 acthd;
52 int score; 51 int score;
53 enum intel_ring_hangcheck_action action; 52 enum intel_ring_hangcheck_action action;
53 bool deadlock;
54}; 54};
55 55
56struct intel_ring_buffer { 56struct intel_ring_buffer {
@@ -292,7 +292,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev);
292int intel_init_blt_ring_buffer(struct drm_device *dev); 292int intel_init_blt_ring_buffer(struct drm_device *dev);
293int intel_init_vebox_ring_buffer(struct drm_device *dev); 293int intel_init_vebox_ring_buffer(struct drm_device *dev);
294 294
295u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); 295u64 intel_ring_get_active_head(struct intel_ring_buffer *ring);
296void intel_ring_setup_status_page(struct intel_ring_buffer *ring); 296void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
297 297
298static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) 298static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 5be4ab218054..bafe92e317d5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1536,9 +1536,14 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
1536 /* 1536 /*
1537 * If the device type is not TV, continue. 1537 * If the device type is not TV, continue.
1538 */ 1538 */
1539 if (p_child->old.device_type != DEVICE_TYPE_INT_TV && 1539 switch (p_child->old.device_type) {
1540 p_child->old.device_type != DEVICE_TYPE_TV) 1540 case DEVICE_TYPE_INT_TV:
1541 case DEVICE_TYPE_TV:
1542 case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
1543 break;
1544 default:
1541 continue; 1545 continue;
1546 }
1542 /* Only when the addin_offset is non-zero, it is regarded 1547 /* Only when the addin_offset is non-zero, it is regarded
1543 * as present. 1548 * as present.
1544 */ 1549 */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c3832d9270a6..f729dc71d5be 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -550,11 +550,12 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
550 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 550 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
551 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 551 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
552 FORCEWAKE_ALL); \ 552 FORCEWAKE_ALL); \
553 dev_priv->uncore.forcewake_count++; \ 553 val = __raw_i915_read##x(dev_priv, reg); \
554 mod_timer_pinned(&dev_priv->uncore.force_wake_timer, \ 554 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
555 jiffies + 1); \ 555 FORCEWAKE_ALL); \
556 } else { \
557 val = __raw_i915_read##x(dev_priv, reg); \
556 } \ 558 } \
557 val = __raw_i915_read##x(dev_priv, reg); \
558 REG_READ_FOOTER; \ 559 REG_READ_FOOTER; \
559} 560}
560 561
@@ -865,7 +866,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
865 struct drm_i915_private *dev_priv = dev->dev_private; 866 struct drm_i915_private *dev_priv = dev->dev_private;
866 struct drm_i915_reg_read *reg = data; 867 struct drm_i915_reg_read *reg = data;
867 struct register_whitelist const *entry = whitelist; 868 struct register_whitelist const *entry = whitelist;
868 int i; 869 int i, ret = 0;
869 870
870 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 871 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
871 if (entry->offset == reg->offset && 872 if (entry->offset == reg->offset &&
@@ -876,6 +877,8 @@ int i915_reg_read_ioctl(struct drm_device *dev,
876 if (i == ARRAY_SIZE(whitelist)) 877 if (i == ARRAY_SIZE(whitelist))
877 return -EINVAL; 878 return -EINVAL;
878 879
880 intel_runtime_pm_get(dev_priv);
881
879 switch (entry->size) { 882 switch (entry->size) {
880 case 8: 883 case 8:
881 reg->val = I915_READ64(reg->offset); 884 reg->val = I915_READ64(reg->offset);
@@ -891,10 +894,13 @@ int i915_reg_read_ioctl(struct drm_device *dev,
891 break; 894 break;
892 default: 895 default:
893 WARN_ON(1); 896 WARN_ON(1);
894 return -EINVAL; 897 ret = -EINVAL;
898 goto out;
895 } 899 }
896 900
897 return 0; 901out:
902 intel_runtime_pm_put(dev_priv);
903 return ret;
898} 904}
899 905
900int i915_get_reset_stats_ioctl(struct drm_device *dev, 906int i915_get_reset_stats_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7147b87c5e5d..ddd83756b9a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -891,13 +891,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
891 struct drm_device *drm_dev = pci_get_drvdata(pdev); 891 struct drm_device *drm_dev = pci_get_drvdata(pdev);
892 int ret; 892 int ret;
893 893
894 if (nouveau_runtime_pm == 0) 894 if (nouveau_runtime_pm == 0) {
895 return -EINVAL; 895 pm_runtime_forbid(dev);
896 return -EBUSY;
897 }
896 898
897 /* are we optimus enabled? */ 899 /* are we optimus enabled? */
898 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { 900 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
899 DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); 901 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
900 return -EINVAL; 902 pm_runtime_forbid(dev);
903 return -EBUSY;
901 } 904 }
902 905
903 nv_debug_level(SILENT); 906 nv_debug_level(SILENT);
@@ -946,12 +949,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
946 struct nouveau_drm *drm = nouveau_drm(drm_dev); 949 struct nouveau_drm *drm = nouveau_drm(drm_dev);
947 struct drm_crtc *crtc; 950 struct drm_crtc *crtc;
948 951
949 if (nouveau_runtime_pm == 0) 952 if (nouveau_runtime_pm == 0) {
953 pm_runtime_forbid(dev);
950 return -EBUSY; 954 return -EBUSY;
955 }
951 956
952 /* are we optimus enabled? */ 957 /* are we optimus enabled? */
953 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { 958 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
954 DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); 959 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
960 pm_runtime_forbid(dev);
955 return -EBUSY; 961 return -EBUSY;
956 } 962 }
957 963
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index be4fcd0f0e0f..c041cd73f399 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
177 if (obj->vmapping) 177 if (obj->vmapping)
178 udl_gem_vunmap(obj); 178 udl_gem_vunmap(obj);
179 179
180 if (gem_obj->import_attach) 180 if (gem_obj->import_attach) {
181 drm_prime_gem_destroy(gem_obj, obj->sg); 181 drm_prime_gem_destroy(gem_obj, obj->sg);
182 put_device(gem_obj->dev->dev);
183 }
182 184
183 if (obj->pages) 185 if (obj->pages)
184 udl_gem_put_pages(obj); 186 udl_gem_put_pages(obj);
@@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
256 int ret; 258 int ret;
257 259
258 /* need to attach */ 260 /* need to attach */
261 get_device(dev->dev);
259 attach = dma_buf_attach(dma_buf, dev->dev); 262 attach = dma_buf_attach(dma_buf, dev->dev);
260 if (IS_ERR(attach)) 263 if (IS_ERR(attach)) {
264 put_device(dev->dev);
261 return ERR_CAST(attach); 265 return ERR_CAST(attach);
266 }
262 267
263 get_dma_buf(dma_buf); 268 get_dma_buf(dma_buf);
264 269
@@ -282,6 +287,6 @@ fail_unmap:
282fail_detach: 287fail_detach:
283 dma_buf_detach(dma_buf, attach); 288 dma_buf_detach(dma_buf, attach);
284 dma_buf_put(dma_buf); 289 dma_buf_put(dma_buf);
285 290 put_device(dev->dev);
286 return ERR_PTR(ret); 291 return ERR_PTR(ret);
287} 292}
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index befe0e336471..24883b4d1a49 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,6 +43,7 @@
43#define G25_REV_MIN 0x22 43#define G25_REV_MIN 0x22
44#define G27_REV_MAJ 0x12 44#define G27_REV_MAJ 0x12
45#define G27_REV_MIN 0x38 45#define G27_REV_MIN 0x38
46#define G27_2_REV_MIN 0x39
46 47
47#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) 48#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
48 49
@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = {
130 {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ 131 {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
131 {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ 132 {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
132 {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ 133 {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
134 {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */
133}; 135};
134 136
135/* Recalculates X axis value accordingly to currently selected range */ 137/* Recalculates X axis value accordingly to currently selected range */
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 12354055d474..2f19b15f47f2 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -42,6 +42,7 @@
42#define DUALSHOCK4_CONTROLLER_BT BIT(6) 42#define DUALSHOCK4_CONTROLLER_BT BIT(6)
43 43
44#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB) 44#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
45#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
45 46
46#define MAX_LEDS 4 47#define MAX_LEDS 4
47 48
@@ -499,6 +500,7 @@ struct sony_sc {
499 __u8 right; 500 __u8 right;
500#endif 501#endif
501 502
503 __u8 worker_initialized;
502 __u8 led_state[MAX_LEDS]; 504 __u8 led_state[MAX_LEDS];
503 __u8 led_count; 505 __u8 led_count;
504}; 506};
@@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev)
993 return input_ff_create_memless(input_dev, NULL, sony_play_effect); 995 return input_ff_create_memless(input_dev, NULL, sony_play_effect);
994} 996}
995 997
996static void sony_destroy_ff(struct hid_device *hdev)
997{
998 struct sony_sc *sc = hid_get_drvdata(hdev);
999
1000 cancel_work_sync(&sc->state_worker);
1001}
1002
1003#else 998#else
1004static int sony_init_ff(struct hid_device *hdev) 999static int sony_init_ff(struct hid_device *hdev)
1005{ 1000{
1006 return 0; 1001 return 0;
1007} 1002}
1008
1009static void sony_destroy_ff(struct hid_device *hdev)
1010{
1011}
1012#endif 1003#endif
1013 1004
1014static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size) 1005static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1077 if (sc->quirks & SIXAXIS_CONTROLLER_USB) { 1068 if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
1078 hdev->hid_output_raw_report = sixaxis_usb_output_raw_report; 1069 hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
1079 ret = sixaxis_set_operational_usb(hdev); 1070 ret = sixaxis_set_operational_usb(hdev);
1071
1072 sc->worker_initialized = 1;
1080 INIT_WORK(&sc->state_worker, sixaxis_state_worker); 1073 INIT_WORK(&sc->state_worker, sixaxis_state_worker);
1081 } 1074 }
1082 else if (sc->quirks & SIXAXIS_CONTROLLER_BT) 1075 else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1087 if (ret < 0) 1080 if (ret < 0)
1088 goto err_stop; 1081 goto err_stop;
1089 1082
1083 sc->worker_initialized = 1;
1090 INIT_WORK(&sc->state_worker, dualshock4_state_worker); 1084 INIT_WORK(&sc->state_worker, dualshock4_state_worker);
1091 } else { 1085 } else {
1092 ret = 0; 1086 ret = 0;
@@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1101 goto err_stop; 1095 goto err_stop;
1102 } 1096 }
1103 1097
1104 ret = sony_init_ff(hdev); 1098 if (sc->quirks & SONY_FF_SUPPORT) {
1105 if (ret < 0) 1099 ret = sony_init_ff(hdev);
1106 goto err_stop; 1100 if (ret < 0)
1101 goto err_stop;
1102 }
1107 1103
1108 return 0; 1104 return 0;
1109err_stop: 1105err_stop:
@@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev)
1120 if (sc->quirks & SONY_LED_SUPPORT) 1116 if (sc->quirks & SONY_LED_SUPPORT)
1121 sony_leds_remove(hdev); 1117 sony_leds_remove(hdev);
1122 1118
1123 sony_destroy_ff(hdev); 1119 if (sc->worker_initialized)
1120 cancel_work_sync(&sc->state_worker);
1124 1121
1125 hid_hw_stop(hdev); 1122 hid_hw_stop(hdev);
1126} 1123}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index cb0137b3718d..ab24ce2eb28f 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
320 hid_hw_close(hidraw->hid); 320 hid_hw_close(hidraw->hid);
321 wake_up_interruptible(&hidraw->wait); 321 wake_up_interruptible(&hidraw->wait);
322 } 322 }
323 device_destroy(hidraw_class,
324 MKDEV(hidraw_major, hidraw->minor));
323 } else { 325 } else {
324 --hidraw->open; 326 --hidraw->open;
325 } 327 }
326 if (!hidraw->open) { 328 if (!hidraw->open) {
327 if (!hidraw->exist) { 329 if (!hidraw->exist) {
328 device_destroy(hidraw_class,
329 MKDEV(hidraw_major, hidraw->minor));
330 hidraw_table[hidraw->minor] = NULL; 330 hidraw_table[hidraw->minor] = NULL;
331 kfree(hidraw); 331 kfree(hidraw);
332 } else { 332 } else {
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index be7f0a20d634..f3b89a4698b6 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -39,7 +39,9 @@
39#include <linux/i2c.h> 39#include <linux/i2c.h>
40#include <linux/io.h> 40#include <linux/io.h>
41#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
42#include <linux/of_address.h>
42#include <linux/of_device.h> 43#include <linux/of_device.h>
44#include <linux/of_irq.h>
43#include <linux/of_platform.h> 45#include <linux/of_platform.h>
44#include <sysdev/fsl_soc.h> 46#include <sysdev/fsl_soc.h>
45#include <asm/cpm.h> 47#include <asm/cpm.h>
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index a06e12552886..ce953d895f5b 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
954 return -EFAULT; 954 return -EFAULT;
955 955
956 error = input_ff_upload(dev, &effect, file); 956 error = input_ff_upload(dev, &effect, file);
957 if (error)
958 return error;
957 959
958 if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) 960 if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
959 return -EFAULT; 961 return -EFAULT;
960 962
961 return error; 963 return 0;
962 } 964 }
963 965
964 /* Multi-number variable-length handlers */ 966 /* Multi-number variable-length handlers */
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index bb3b57bea8ba..5ef7fcf0e250 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
76 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); 76 struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
77 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); 77 unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
78 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); 78 unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
79 int val;
79 80
80 return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); 81 mutex_lock(&kpad->gpio_lock);
82
83 if (kpad->dir[bank] & bit)
84 val = kpad->dat_out[bank];
85 else
86 val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
87
88 mutex_unlock(&kpad->gpio_lock);
89
90 return !!(val & bit);
81} 91}
82 92
83static void adp5588_gpio_set_value(struct gpio_chip *chip, 93static void adp5588_gpio_set_value(struct gpio_chip *chip,
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 1f695f229ea8..184c8f21ab59 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -27,29 +27,32 @@ struct da9052_onkey {
27 27
28static void da9052_onkey_query(struct da9052_onkey *onkey) 28static void da9052_onkey_query(struct da9052_onkey *onkey)
29{ 29{
30 int key_stat; 30 int ret;
31 31
32 key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG); 32 ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
33 if (key_stat < 0) { 33 if (ret < 0) {
34 dev_err(onkey->da9052->dev, 34 dev_err(onkey->da9052->dev,
35 "Failed to read onkey event %d\n", key_stat); 35 "Failed to read onkey event err=%d\n", ret);
36 } else { 36 } else {
37 /* 37 /*
38 * Since interrupt for deassertion of ONKEY pin is not 38 * Since interrupt for deassertion of ONKEY pin is not
39 * generated, onkey event state determines the onkey 39 * generated, onkey event state determines the onkey
40 * button state. 40 * button state.
41 */ 41 */
42 key_stat &= DA9052_EVENTB_ENONKEY; 42 bool pressed = !(ret & DA9052_STATUSA_NONKEY);
43 input_report_key(onkey->input, KEY_POWER, key_stat); 43
44 input_report_key(onkey->input, KEY_POWER, pressed);
44 input_sync(onkey->input); 45 input_sync(onkey->input);
45 }
46 46
47 /* 47 /*
48 * Interrupt is generated only when the ONKEY pin is asserted. 48 * Interrupt is generated only when the ONKEY pin
49 * Hence the deassertion of the pin is simulated through work queue. 49 * is asserted. Hence the deassertion of the pin
50 */ 50 * is simulated through work queue.
51 if (key_stat) 51 */
52 schedule_delayed_work(&onkey->work, msecs_to_jiffies(50)); 52 if (pressed)
53 schedule_delayed_work(&onkey->work,
54 msecs_to_jiffies(50));
55 }
53} 56}
54 57
55static void da9052_onkey_work(struct work_struct *work) 58static void da9052_onkey_work(struct work_struct *work)
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c
index 87095e2f5153..8af34ffe208b 100644
--- a/drivers/input/mouse/cypress_ps2.c
+++ b/drivers/input/mouse/cypress_ps2.c
@@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
409 __clear_bit(REL_X, input->relbit); 409 __clear_bit(REL_X, input->relbit);
410 __clear_bit(REL_Y, input->relbit); 410 __clear_bit(REL_Y, input->relbit);
411 411
412 __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
413 __set_bit(EV_KEY, input->evbit); 412 __set_bit(EV_KEY, input->evbit);
414 __set_bit(BTN_LEFT, input->keybit); 413 __set_bit(BTN_LEFT, input->keybit);
415 __set_bit(BTN_RIGHT, input->keybit); 414 __set_bit(BTN_RIGHT, input->keybit);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 26386f9d2569..d8d49d10f9bb 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
265 * Read touchpad resolution and maximum reported coordinates 265 * Read touchpad resolution and maximum reported coordinates
266 * Resolution is left zero if touchpad does not support the query 266 * Resolution is left zero if touchpad does not support the query
267 */ 267 */
268
269static const int *quirk_min_max;
270
268static int synaptics_resolution(struct psmouse *psmouse) 271static int synaptics_resolution(struct psmouse *psmouse)
269{ 272{
270 struct synaptics_data *priv = psmouse->private; 273 struct synaptics_data *priv = psmouse->private;
271 unsigned char resp[3]; 274 unsigned char resp[3];
272 275
276 if (quirk_min_max) {
277 priv->x_min = quirk_min_max[0];
278 priv->x_max = quirk_min_max[1];
279 priv->y_min = quirk_min_max[2];
280 priv->y_max = quirk_min_max[3];
281 return 0;
282 }
283
273 if (SYN_ID_MAJOR(priv->identity) < 4) 284 if (SYN_ID_MAJOR(priv->identity) < 4)
274 return 0; 285 return 0;
275 286
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
1485 { } 1496 { }
1486}; 1497};
1487 1498
1499static const struct dmi_system_id min_max_dmi_table[] __initconst = {
1500#if defined(CONFIG_DMI)
1501 {
1502 /* Lenovo ThinkPad Helix */
1503 .matches = {
1504 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1505 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
1506 },
1507 .driver_data = (int []){1024, 5052, 2258, 4832},
1508 },
1509 {
1510 /* Lenovo ThinkPad X240 */
1511 .matches = {
1512 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1513 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
1514 },
1515 .driver_data = (int []){1232, 5710, 1156, 4696},
1516 },
1517 {
1518 /* Lenovo ThinkPad T440s */
1519 .matches = {
1520 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1521 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
1522 },
1523 .driver_data = (int []){1024, 5112, 2024, 4832},
1524 },
1525 {
1526 /* Lenovo ThinkPad T540p */
1527 .matches = {
1528 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1529 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
1530 },
1531 .driver_data = (int []){1024, 5056, 2058, 4832},
1532 },
1533#endif
1534 { }
1535};
1536
1488void __init synaptics_module_init(void) 1537void __init synaptics_module_init(void)
1489{ 1538{
1539 const struct dmi_system_id *min_max_dmi;
1540
1490 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); 1541 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
1491 broken_olpc_ec = dmi_check_system(olpc_dmi_table); 1542 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
1543
1544 min_max_dmi = dmi_first_match(min_max_dmi_table);
1545 if (min_max_dmi)
1546 quirk_min_max = min_max_dmi->driver_data;
1492} 1547}
1493 1548
1494static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) 1549static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 4c842c320c2e..b604564dec5c 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -67,7 +67,6 @@ struct mousedev {
67 struct device dev; 67 struct device dev;
68 struct cdev cdev; 68 struct cdev cdev;
69 bool exist; 69 bool exist;
70 bool is_mixdev;
71 70
72 struct list_head mixdev_node; 71 struct list_head mixdev_node;
73 bool opened_by_mixdev; 72 bool opened_by_mixdev;
@@ -77,6 +76,9 @@ struct mousedev {
77 int old_x[4], old_y[4]; 76 int old_x[4], old_y[4];
78 int frac_dx, frac_dy; 77 int frac_dx, frac_dy;
79 unsigned long touch; 78 unsigned long touch;
79
80 int (*open_device)(struct mousedev *mousedev);
81 void (*close_device)(struct mousedev *mousedev);
80}; 82};
81 83
82enum mousedev_emul { 84enum mousedev_emul {
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
116static struct mousedev *mousedev_mix; 118static struct mousedev *mousedev_mix;
117static LIST_HEAD(mousedev_mix_list); 119static LIST_HEAD(mousedev_mix_list);
118 120
119static void mixdev_open_devices(void);
120static void mixdev_close_devices(void);
121
122#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) 121#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
123#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) 122#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
124 123
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
428 if (retval) 427 if (retval)
429 return retval; 428 return retval;
430 429
431 if (mousedev->is_mixdev) 430 if (!mousedev->exist)
432 mixdev_open_devices();
433 else if (!mousedev->exist)
434 retval = -ENODEV; 431 retval = -ENODEV;
435 else if (!mousedev->open++) { 432 else if (!mousedev->open++) {
436 retval = input_open_device(&mousedev->handle); 433 retval = input_open_device(&mousedev->handle);
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
446{ 443{
447 mutex_lock(&mousedev->mutex); 444 mutex_lock(&mousedev->mutex);
448 445
449 if (mousedev->is_mixdev) 446 if (mousedev->exist && !--mousedev->open)
450 mixdev_close_devices();
451 else if (mousedev->exist && !--mousedev->open)
452 input_close_device(&mousedev->handle); 447 input_close_device(&mousedev->handle);
453 448
454 mutex_unlock(&mousedev->mutex); 449 mutex_unlock(&mousedev->mutex);
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
459 * stream. Note that this function is called with mousedev_mix->mutex 454 * stream. Note that this function is called with mousedev_mix->mutex
460 * held. 455 * held.
461 */ 456 */
462static void mixdev_open_devices(void) 457static int mixdev_open_devices(struct mousedev *mixdev)
463{ 458{
464 struct mousedev *mousedev; 459 int error;
460
461 error = mutex_lock_interruptible(&mixdev->mutex);
462 if (error)
463 return error;
465 464
466 if (mousedev_mix->open++) 465 if (!mixdev->open++) {
467 return; 466 struct mousedev *mousedev;
468 467
469 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { 468 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
470 if (!mousedev->opened_by_mixdev) { 469 if (!mousedev->opened_by_mixdev) {
471 if (mousedev_open_device(mousedev)) 470 if (mousedev_open_device(mousedev))
472 continue; 471 continue;
473 472
474 mousedev->opened_by_mixdev = true; 473 mousedev->opened_by_mixdev = true;
474 }
475 } 475 }
476 } 476 }
477
478 mutex_unlock(&mixdev->mutex);
479 return 0;
477} 480}
478 481
479/* 482/*
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
481 * device. Note that this function is called with mousedev_mix->mutex 484 * device. Note that this function is called with mousedev_mix->mutex
482 * held. 485 * held.
483 */ 486 */
484static void mixdev_close_devices(void) 487static void mixdev_close_devices(struct mousedev *mixdev)
485{ 488{
486 struct mousedev *mousedev; 489 mutex_lock(&mixdev->mutex);
487 490
488 if (--mousedev_mix->open) 491 if (!--mixdev->open) {
489 return; 492 struct mousedev *mousedev;
490 493
491 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { 494 list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
492 if (mousedev->opened_by_mixdev) { 495 if (mousedev->opened_by_mixdev) {
493 mousedev->opened_by_mixdev = false; 496 mousedev->opened_by_mixdev = false;
494 mousedev_close_device(mousedev); 497 mousedev_close_device(mousedev);
498 }
495 } 499 }
496 } 500 }
501
502 mutex_unlock(&mixdev->mutex);
497} 503}
498 504
499 505
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
522 mousedev_detach_client(mousedev, client); 528 mousedev_detach_client(mousedev, client);
523 kfree(client); 529 kfree(client);
524 530
525 mousedev_close_device(mousedev); 531 mousedev->close_device(mousedev);
526 532
527 return 0; 533 return 0;
528} 534}
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
550 client->mousedev = mousedev; 556 client->mousedev = mousedev;
551 mousedev_attach_client(mousedev, client); 557 mousedev_attach_client(mousedev, client);
552 558
553 error = mousedev_open_device(mousedev); 559 error = mousedev->open_device(mousedev);
554 if (error) 560 if (error)
555 goto err_free_client; 561 goto err_free_client;
556 562
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
861 867
862 if (mixdev) { 868 if (mixdev) {
863 dev_set_name(&mousedev->dev, "mice"); 869 dev_set_name(&mousedev->dev, "mice");
870
871 mousedev->open_device = mixdev_open_devices;
872 mousedev->close_device = mixdev_close_devices;
864 } else { 873 } else {
865 int dev_no = minor; 874 int dev_no = minor;
866 /* Normalize device number if it falls into legacy range */ 875 /* Normalize device number if it falls into legacy range */
867 if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) 876 if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
868 dev_no -= MOUSEDEV_MINOR_BASE; 877 dev_no -= MOUSEDEV_MINOR_BASE;
869 dev_set_name(&mousedev->dev, "mouse%d", dev_no); 878 dev_set_name(&mousedev->dev, "mouse%d", dev_no);
879
880 mousedev->open_device = mousedev_open_device;
881 mousedev->close_device = mousedev_close_device;
870 } 882 }
871 883
872 mousedev->exist = true; 884 mousedev->exist = true;
873 mousedev->is_mixdev = mixdev;
874 mousedev->handle.dev = input_get_device(dev); 885 mousedev->handle.dev = input_get_device(dev);
875 mousedev->handle.name = dev_name(&mousedev->dev); 886 mousedev->handle.name = dev_name(&mousedev->dev);
876 mousedev->handle.handler = handler; 887 mousedev->handle.handler = handler;
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
919 device_del(&mousedev->dev); 930 device_del(&mousedev->dev);
920 mousedev_cleanup(mousedev); 931 mousedev_cleanup(mousedev);
921 input_free_minor(MINOR(mousedev->dev.devt)); 932 input_free_minor(MINOR(mousedev->dev.devt));
922 if (!mousedev->is_mixdev) 933 if (mousedev != mousedev_mix)
923 input_unregister_handle(&mousedev->handle); 934 input_unregister_handle(&mousedev->handle);
924 put_device(&mousedev->dev); 935 put_device(&mousedev->dev);
925} 936}
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index f04686580040..9816c51eb5c2 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -16,9 +16,17 @@ config CAPI_TRACE
16 This will increase the size of the kernelcapi module by 20 KB. 16 This will increase the size of the kernelcapi module by 20 KB.
17 If unsure, say Y. 17 If unsure, say Y.
18 18
19config ISDN_CAPI_CAPI20
20 tristate "CAPI2.0 /dev/capi support"
21 help
22 This option will provide the CAPI 2.0 interface to userspace
23 applications via /dev/capi20. Applications should use the
24 standardized libcapi20 to access this functionality. You should say
25 Y/M here.
26
19config ISDN_CAPI_MIDDLEWARE 27config ISDN_CAPI_MIDDLEWARE
20 bool "CAPI2.0 Middleware support" 28 bool "CAPI2.0 Middleware support"
21 depends on TTY 29 depends on ISDN_CAPI_CAPI20 && TTY
22 help 30 help
23 This option will enhance the capabilities of the /dev/capi20 31 This option will enhance the capabilities of the /dev/capi20
24 interface. It will provide a means of moving a data connection, 32 interface. It will provide a means of moving a data connection,
@@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE
26 device. If you want to use pppd with pppdcapiplugin to dial up to 34 device. If you want to use pppd with pppdcapiplugin to dial up to
27 your ISP, say Y here. 35 your ISP, say Y here.
28 36
29config ISDN_CAPI_CAPI20
30 tristate "CAPI2.0 /dev/capi support"
31 help
32 This option will provide the CAPI 2.0 interface to userspace
33 applications via /dev/capi20. Applications should use the
34 standardized libcapi20 to access this functionality. You should say
35 Y/M here.
36
37config ISDN_CAPI_CAPIDRV 37config ISDN_CAPI_CAPIDRV
38 tristate "CAPI2.0 capidrv interface support" 38 tristate "CAPI2.0 capidrv interface support"
39 depends on ISDN_I4L 39 depends on ISDN_I4L
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 2e45f6ec1bf0..380d24922049 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1248 * shared register for the high 32 bits, so only a single, aligned, 1248 * shared register for the high 32 bits, so only a single, aligned,
1249 * 4 GB physical address range can be used for descriptors. 1249 * 4 GB physical address range can be used for descriptors.
1250 */ 1250 */
1251 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 1251 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1252 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1253 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); 1252 dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
1254 } else { 1253 } else {
1255 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 1254 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1256 if (err) { 1255 if (err) {
1257 err = dma_set_coherent_mask(&pdev->dev, 1256 dev_err(&pdev->dev, "No usable DMA config, aborting\n");
1258 DMA_BIT_MASK(32)); 1257 goto out_pci_disable;
1259 if (err) {
1260 dev_err(&pdev->dev,
1261 "No usable DMA config, aborting\n");
1262 goto out_pci_disable;
1263 }
1264 } 1258 }
1265 } 1259 }
1266 1260
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index d5c2d3e912e5..422aab27ea1b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2436,7 +2436,7 @@ err_reset:
2436err_register: 2436err_register:
2437err_sw_init: 2437err_sw_init:
2438err_eeprom: 2438err_eeprom:
2439 iounmap(adapter->hw.hw_addr); 2439 pci_iounmap(pdev, adapter->hw.hw_addr);
2440err_init_netdev: 2440err_init_netdev:
2441err_ioremap: 2441err_ioremap:
2442 free_netdev(netdev); 2442 free_netdev(netdev);
@@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev)
2474 unregister_netdev(netdev); 2474 unregister_netdev(netdev);
2475 atl1e_free_ring_resources(adapter); 2475 atl1e_free_ring_resources(adapter);
2476 atl1e_force_ps(&adapter->hw); 2476 atl1e_force_ps(&adapter->hw);
2477 iounmap(adapter->hw.hw_addr); 2477 pci_iounmap(pdev, adapter->hw.hw_addr);
2478 pci_release_regions(pdev); 2478 pci_release_regions(pdev);
2479 free_netdev(netdev); 2479 free_netdev(netdev);
2480 pci_disable_device(pdev); 2480 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index fcf9105a5476..09f3fefcbf9c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
1/* cnic.c: Broadcom CNIC core network driver. 1/* cnic.c: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2013 Broadcom Corporation 3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
342 while (retry < 3) { 342 while (retry < 3) {
343 rc = 0; 343 rc = 0;
344 rcu_read_lock(); 344 rcu_read_lock();
345 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 345 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
346 if (ulp_ops) 346 if (ulp_ops)
347 rc = ulp_ops->iscsi_nl_send_msg( 347 rc = ulp_ops->iscsi_nl_send_msg(
348 cp->ulp_handle[CNIC_ULP_ISCSI], 348 cp->ulp_handle[CNIC_ULP_ISCSI],
@@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
726 726
727 for (i = 0; i < dma->num_pages; i++) { 727 for (i = 0; i < dma->num_pages; i++) {
728 if (dma->pg_arr[i]) { 728 if (dma->pg_arr[i]) {
729 dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, 729 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
730 dma->pg_arr[i], dma->pg_map_arr[i]); 730 dma->pg_arr[i], dma->pg_map_arr[i]);
731 dma->pg_arr[i] = NULL; 731 dma->pg_arr[i] = NULL;
732 } 732 }
@@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
785 785
786 for (i = 0; i < pages; i++) { 786 for (i = 0; i < pages; i++) {
787 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 787 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
788 BNX2_PAGE_SIZE, 788 CNIC_PAGE_SIZE,
789 &dma->pg_map_arr[i], 789 &dma->pg_map_arr[i],
790 GFP_ATOMIC); 790 GFP_ATOMIC);
791 if (dma->pg_arr[i] == NULL) 791 if (dma->pg_arr[i] == NULL)
@@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
794 if (!use_pg_tbl) 794 if (!use_pg_tbl)
795 return 0; 795 return 0;
796 796
797 dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & 797 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
798 ~(BNX2_PAGE_SIZE - 1); 798 ~(CNIC_PAGE_SIZE - 1);
799 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 799 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
800 &dma->pgtbl_map, GFP_ATOMIC); 800 &dma->pgtbl_map, GFP_ATOMIC);
801 if (dma->pgtbl == NULL) 801 if (dma->pgtbl == NULL)
@@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
900 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { 900 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
901 int i, k, arr_size; 901 int i, k, arr_size;
902 902
903 cp->ctx_blk_size = BNX2_PAGE_SIZE; 903 cp->ctx_blk_size = CNIC_PAGE_SIZE;
904 cp->cids_per_blk = BNX2_PAGE_SIZE / 128; 904 cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
905 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 905 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
906 sizeof(struct cnic_ctx); 906 sizeof(struct cnic_ctx);
907 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 907 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
933 for (i = 0; i < cp->ctx_blks; i++) { 933 for (i = 0; i < cp->ctx_blks; i++) {
934 cp->ctx_arr[i].ctx = 934 cp->ctx_arr[i].ctx =
935 dma_alloc_coherent(&dev->pcidev->dev, 935 dma_alloc_coherent(&dev->pcidev->dev,
936 BNX2_PAGE_SIZE, 936 CNIC_PAGE_SIZE,
937 &cp->ctx_arr[i].mapping, 937 &cp->ctx_arr[i].mapping,
938 GFP_KERNEL); 938 GFP_KERNEL);
939 if (cp->ctx_arr[i].ctx == NULL) 939 if (cp->ctx_arr[i].ctx == NULL)
@@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1013 if (udev->l2_ring) 1013 if (udev->l2_ring)
1014 return 0; 1014 return 0;
1015 1015
1016 udev->l2_ring_size = pages * BNX2_PAGE_SIZE; 1016 udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
1017 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 1017 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1018 &udev->l2_ring_map, 1018 &udev->l2_ring_map,
1019 GFP_KERNEL | __GFP_COMP); 1019 GFP_KERNEL | __GFP_COMP);
@@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1021 return -ENOMEM; 1021 return -ENOMEM;
1022 1022
1023 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 1023 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1024 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 1024 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
1025 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 1025 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1026 &udev->l2_buf_map, 1026 &udev->l2_buf_map,
1027 GFP_KERNEL | __GFP_COMP); 1027 GFP_KERNEL | __GFP_COMP);
@@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
1102 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + 1102 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
1103 TX_MAX_TSS_RINGS + 1); 1103 TX_MAX_TSS_RINGS + 1);
1104 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1104 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1105 PAGE_MASK; 1105 CNIC_PAGE_MASK;
1106 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1106 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1107 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 1107 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1108 else 1108 else
@@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
1113 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); 1113 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
1114 1114
1115 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1115 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1116 PAGE_MASK; 1116 CNIC_PAGE_MASK;
1117 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1117 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1118 1118
1119 uinfo->name = "bnx2x_cnic"; 1119 uinfo->name = "bnx2x_cnic";
@@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1267 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) 1267 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1268 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; 1268 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1269 1269
1270 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1270 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1271 PAGE_SIZE; 1271 CNIC_PAGE_SIZE;
1272 1272
1273 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1273 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1274 if (ret) 1274 if (ret)
1275 return -ENOMEM; 1275 return -ENOMEM;
1276 1276
1277 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1277 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1278 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1278 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1279 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1279 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1280 1280
@@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1296 goto error; 1296 goto error;
1297 } 1297 }
1298 1298
1299 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; 1299 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
1300 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1300 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1301 if (ret) 1301 if (ret)
1302 goto error; 1302 goto error;
@@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1466 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1466 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1467 BNX2X_ISCSI_R2TQE_SIZE; 1467 BNX2X_ISCSI_R2TQE_SIZE;
1468 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1468 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1469 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1469 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1470 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1470 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1471 cp->num_cqs = req1->num_cqs; 1471 cp->num_cqs = req1->num_cqs;
1472 1472
1473 if (!dev->max_iscsi_conn) 1473 if (!dev->max_iscsi_conn)
@@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1477 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1477 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1478 req1->rq_num_wqes); 1478 req1->rq_num_wqes);
1479 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1479 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1480 PAGE_SIZE); 1480 CNIC_PAGE_SIZE);
1481 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1481 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1482 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1482 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1483 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1484 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1484 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1485 req1->num_tasks_per_conn); 1485 req1->num_tasks_per_conn);
@@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1489 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1489 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1490 req1->rq_buffer_size); 1490 req1->rq_buffer_size);
1491 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1491 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1492 PAGE_SIZE); 1492 CNIC_PAGE_SIZE);
1493 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1493 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1494 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1494 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1495 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1495 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1496 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1496 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1497 req1->num_tasks_per_conn); 1497 req1->num_tasks_per_conn);
@@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1504 1504
1505 /* init Xstorm RAM */ 1505 /* init Xstorm RAM */
1506 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1506 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1507 PAGE_SIZE); 1507 CNIC_PAGE_SIZE);
1508 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1508 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1509 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1509 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1511 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1511 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1512 req1->num_tasks_per_conn); 1512 req1->num_tasks_per_conn);
@@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1519 1519
1520 /* init Cstorm RAM */ 1520 /* init Cstorm RAM */
1521 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1521 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1522 PAGE_SIZE); 1522 CNIC_PAGE_SIZE);
1523 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1523 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1524 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); 1524 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
1525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1526 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1526 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1527 req1->num_tasks_per_conn); 1527 req1->num_tasks_per_conn);
@@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1623 } 1623 }
1624 1624
1625 ctx->cid = cid; 1625 ctx->cid = cid;
1626 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; 1626 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
1627 1627
1628 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1628 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1629 if (ret) 1629 if (ret)
1630 goto error; 1630 goto error;
1631 1631
1632 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; 1632 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
1633 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1633 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1634 if (ret) 1634 if (ret)
1635 goto error; 1635 goto error;
1636 1636
1637 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; 1637 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
1638 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1638 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1639 if (ret) 1639 if (ret)
1640 goto error; 1640 goto error;
@@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1760 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1760 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1761 /* TSTORM requires the base address of RQ DB & not PTE */ 1761 /* TSTORM requires the base address of RQ DB & not PTE */
1762 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1762 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1763 req2->rq_page_table_addr_lo & PAGE_MASK; 1763 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
1764 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1764 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1765 req2->rq_page_table_addr_hi; 1765 req2->rq_page_table_addr_hi;
1766 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1766 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1842 /* CSTORM and USTORM initialization is different, CSTORM requires 1842 /* CSTORM and USTORM initialization is different, CSTORM requires
1843 * CQ DB base & not PTE addr */ 1843 * CQ DB base & not PTE addr */
1844 ictx->cstorm_st_context.cq_db_base.lo = 1844 ictx->cstorm_st_context.cq_db_base.lo =
1845 req1->cq_page_table_addr_lo & PAGE_MASK; 1845 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
1846 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1846 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1847 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1847 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1848 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1848 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
2911 u16 hw_cons, sw_cons; 2911 u16 hw_cons, sw_cons;
2912 struct cnic_uio_dev *udev = cp->udev; 2912 struct cnic_uio_dev *udev = cp->udev;
2913 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2913 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2914 (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); 2914 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
2915 u32 cmd; 2915 u32 cmd;
2916 int comp = 0; 2916 int comp = 0;
2917 2917
@@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3244 int rc; 3244 int rc;
3245 3245
3246 mutex_lock(&cnic_lock); 3246 mutex_lock(&cnic_lock);
3247 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 3247 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
3248 lockdep_is_held(&cnic_lock));
3248 if (ulp_ops && ulp_ops->cnic_get_stats) 3249 if (ulp_ops && ulp_ops->cnic_get_stats)
3249 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); 3250 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3250 else 3251 else
@@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4384 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4385 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4385 u32 val; 4386 u32 val;
4386 4387
4387 memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); 4388 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
4388 4389
4389 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4390 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4390 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4391 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4628 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4629 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4629 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4630 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4630 4631
4631 rxbd = udev->l2_ring + BNX2_PAGE_SIZE; 4632 rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
4632 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { 4633 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4633 dma_addr_t buf_map; 4634 dma_addr_t buf_map;
4634 int n = (i % cp->l2_rx_ring_size) + 1; 4635 int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4639 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4640 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4640 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4641 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4641 } 4642 }
4642 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; 4643 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
4643 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4644 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4644 rxbd->rx_bd_haddr_hi = val; 4645 rxbd->rx_bd_haddr_hi = val;
4645 4646
4646 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; 4647 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
4647 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4648 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4648 rxbd->rx_bd_haddr_lo = val; 4649 rxbd->rx_bd_haddr_lo = val;
4649 4650
@@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4709 4710
4710 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4711 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4711 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4712 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4712 if (BNX2_PAGE_BITS > 12) 4713 if (CNIC_PAGE_BITS > 12)
4713 val |= (12 - 8) << 4; 4714 val |= (12 - 8) << 4;
4714 else 4715 else
4715 val |= (BNX2_PAGE_BITS - 8) << 4; 4716 val |= (CNIC_PAGE_BITS - 8) << 4;
4716 4717
4717 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4718 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4718 4719
@@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4742 4743
4743 /* Initialize the kernel work queue context. */ 4744 /* Initialize the kernel work queue context. */
4744 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4745 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4745 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4746 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4746 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4747 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4747 4748
4748 val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4749 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4749 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4750 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4750 4751
4751 val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4752 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4752 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4753 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4753 4754
4754 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4755 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4768 4769
4769 /* Initialize the kernel complete queue context. */ 4770 /* Initialize the kernel complete queue context. */
4770 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4771 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4771 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4772 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4772 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4773 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4773 4774
4774 val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4775 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4775 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4776 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4776 4777
4777 val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4778 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4778 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4779 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4779 4780
4780 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4781 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4918 u32 cli = cp->ethdev->iscsi_l2_client_id; 4919 u32 cli = cp->ethdev->iscsi_l2_client_id;
4919 u32 val; 4920 u32 val;
4920 4921
4921 memset(txbd, 0, BNX2_PAGE_SIZE); 4922 memset(txbd, 0, CNIC_PAGE_SIZE);
4922 4923
4923 buf_map = udev->l2_buf_map; 4924 buf_map = udev->l2_buf_map;
4924 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4925 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4978 struct bnx2x *bp = netdev_priv(dev->netdev); 4979 struct bnx2x *bp = netdev_priv(dev->netdev);
4979 struct cnic_uio_dev *udev = cp->udev; 4980 struct cnic_uio_dev *udev = cp->udev;
4980 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4981 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4981 BNX2_PAGE_SIZE); 4982 CNIC_PAGE_SIZE);
4982 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4983 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4983 (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); 4984 (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
4984 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4985 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4985 int i; 4986 int i;
4986 u32 cli = cp->ethdev->iscsi_l2_client_id; 4987 u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
5004 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 5005 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
5005 } 5006 }
5006 5007
5007 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; 5008 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
5008 rxbd->addr_hi = cpu_to_le32(val); 5009 rxbd->addr_hi = cpu_to_le32(val);
5009 data->rx.bd_page_base.hi = cpu_to_le32(val); 5010 data->rx.bd_page_base.hi = cpu_to_le32(val);
5010 5011
5011 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; 5012 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
5012 rxbd->addr_lo = cpu_to_le32(val); 5013 rxbd->addr_lo = cpu_to_le32(val);
5013 data->rx.bd_page_base.lo = cpu_to_le32(val); 5014 data->rx.bd_page_base.lo = cpu_to_le32(val);
5014 5015
5015 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 5016 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
5016 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; 5017 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
5017 rxcqe->addr_hi = cpu_to_le32(val); 5018 rxcqe->addr_hi = cpu_to_le32(val);
5018 data->rx.cqe_page_base.hi = cpu_to_le32(val); 5019 data->rx.cqe_page_base.hi = cpu_to_le32(val);
5019 5020
5020 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; 5021 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
5021 rxcqe->addr_lo = cpu_to_le32(val); 5022 rxcqe->addr_lo = cpu_to_le32(val);
5022 data->rx.cqe_page_base.lo = cpu_to_le32(val); 5023 data->rx.cqe_page_base.lo = cpu_to_le32(val);
5023 5024
@@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
5265 msleep(10); 5266 msleep(10);
5266 } 5267 }
5267 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5268 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5268 rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; 5269 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
5269 memset(rx_ring, 0, BNX2_PAGE_SIZE); 5270 memset(rx_ring, 0, CNIC_PAGE_SIZE);
5270} 5271}
5271 5272
5272static int cnic_register_netdev(struct cnic_dev *dev) 5273static int cnic_register_netdev(struct cnic_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0d6b13f854d9..d535ae4228b4 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
1/* cnic.h: Broadcom CNIC core network driver. 1/* cnic.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2013 Broadcom Corporation 3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 95a8e4b11c9f..dcbca6997e8f 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
1 1
2/* cnic.c: Broadcom CNIC core network driver. 2/* cnic.c: Broadcom CNIC core network driver.
3 * 3 *
4 * Copyright (c) 2006-2013 Broadcom Corporation 4 * Copyright (c) 2006-2014 Broadcom Corporation
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 8cf6b1926069..5f4d5573a73d 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
1/* cnic_if.h: Broadcom CNIC core network driver. 1/* cnic_if.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2013 Broadcom Corporation 3 * Copyright (c) 2006-2014 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
14 14
15#include "bnx2x/bnx2x_mfw_req.h" 15#include "bnx2x/bnx2x_mfw_req.h"
16 16
17#define CNIC_MODULE_VERSION "2.5.19" 17#define CNIC_MODULE_VERSION "2.5.20"
18#define CNIC_MODULE_RELDATE "December 19, 2013" 18#define CNIC_MODULE_RELDATE "March 14, 2014"
19 19
20#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
@@ -24,6 +24,16 @@
24#define MAX_CNIC_ULP_TYPE_EXT 3 24#define MAX_CNIC_ULP_TYPE_EXT 3
25#define MAX_CNIC_ULP_TYPE 4 25#define MAX_CNIC_ULP_TYPE 4
26 26
27/* Use CPU native page size up to 16K for cnic ring sizes. */
28#if (PAGE_SHIFT > 14)
29#define CNIC_PAGE_BITS 14
30#else
31#define CNIC_PAGE_BITS PAGE_SHIFT
32#endif
33#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
34#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
35#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
36
27struct kwqe { 37struct kwqe {
28 u32 kwqe_op_flag; 38 u32 kwqe_op_flag;
29 39
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b6d0ba86c71..70a225c8df5c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -17649,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev,
17649 17649
17650 tg3_init_bufmgr_config(tp); 17650 tg3_init_bufmgr_config(tp);
17651 17651
17652 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17653
17654 /* 5700 B0 chips do not support checksumming correctly due 17652 /* 5700 B0 chips do not support checksumming correctly due
17655 * to hardware bugs. 17653 * to hardware bugs.
17656 */ 17654 */
@@ -17682,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev,
17682 features |= NETIF_F_TSO_ECN; 17680 features |= NETIF_F_TSO_ECN;
17683 } 17681 }
17684 17682
17685 dev->features |= features; 17683 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17684 NETIF_F_HW_VLAN_CTAG_RX;
17686 dev->vlan_features |= features; 17685 dev->vlan_features |= features;
17687 17686
17688 /* 17687 /*
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f418f4f20f94..8d76fca7fde7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -22,6 +22,7 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <net/ip.h> 23#include <net/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <linux/io.h>
25#include <linux/of.h> 26#include <linux/of.h>
26#include <linux/of_irq.h> 27#include <linux/of_irq.h>
27#include <linux/of_mdio.h> 28#include <linux/of_mdio.h>
@@ -88,8 +89,9 @@
88#define MVNETA_TX_IN_PRGRS BIT(1) 89#define MVNETA_TX_IN_PRGRS BIT(1)
89#define MVNETA_TX_FIFO_EMPTY BIT(8) 90#define MVNETA_TX_FIFO_EMPTY BIT(8)
90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
91#define MVNETA_SGMII_SERDES_CFG 0x24A0 92#define MVNETA_SERDES_CFG 0x24A0
92#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
94#define MVNETA_RGMII_SERDES_PROTO 0x0667
93#define MVNETA_TYPE_PRIO 0x24bc 95#define MVNETA_TYPE_PRIO 0x24bc
94#define MVNETA_FORCE_UNI BIT(21) 96#define MVNETA_FORCE_UNI BIT(21)
95#define MVNETA_TXQ_CMD_1 0x24e4 97#define MVNETA_TXQ_CMD_1 0x24e4
@@ -161,7 +163,7 @@
161#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 163#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
162#define MVNETA_GMAC0_PORT_ENABLE BIT(0) 164#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
163#define MVNETA_GMAC_CTRL_2 0x2c08 165#define MVNETA_GMAC_CTRL_2 0x2c08
164#define MVNETA_GMAC2_PSC_ENABLE BIT(3) 166#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
165#define MVNETA_GMAC2_PORT_RGMII BIT(4) 167#define MVNETA_GMAC2_PORT_RGMII BIT(4)
166#define MVNETA_GMAC2_PORT_RESET BIT(6) 168#define MVNETA_GMAC2_PORT_RESET BIT(6)
167#define MVNETA_GMAC_STATUS 0x2c10 169#define MVNETA_GMAC_STATUS 0x2c10
@@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
710 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); 712 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
711} 713}
712 714
713
714
715/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
716static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
717{
718 u32 val;
719
720 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
721
722 if (enable)
723 val |= MVNETA_GMAC2_PORT_RGMII;
724 else
725 val &= ~MVNETA_GMAC2_PORT_RGMII;
726
727 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
728}
729
730/* Config SGMII port */
731static void mvneta_port_sgmii_config(struct mvneta_port *pp)
732{
733 u32 val;
734
735 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
736 val |= MVNETA_GMAC2_PSC_ENABLE;
737 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
738
739 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
740}
741
742/* Start the Ethernet port RX and TX activity */ 715/* Start the Ethernet port RX and TX activity */
743static void mvneta_port_up(struct mvneta_port *pp) 716static void mvneta_port_up(struct mvneta_port *pp)
744{ 717{
@@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2756 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2757 2730
2758 if (phy_mode == PHY_INTERFACE_MODE_SGMII) 2731 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2759 mvneta_port_sgmii_config(pp); 2732 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2733 else
2734 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
2735
2736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2760 2737
2761 mvneta_gmac_rgmii_set(pp, 1); 2738 val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2762 2739
2763 /* Cancel Port Reset */ 2740 /* Cancel Port Reset */
2764 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2765 val &= ~MVNETA_GMAC2_PORT_RESET; 2741 val &= ~MVNETA_GMAC2_PORT_RESET;
2766 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 2742 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2767 2743
@@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2774static int mvneta_probe(struct platform_device *pdev) 2750static int mvneta_probe(struct platform_device *pdev)
2775{ 2751{
2776 const struct mbus_dram_target_info *dram_target_info; 2752 const struct mbus_dram_target_info *dram_target_info;
2753 struct resource *res;
2777 struct device_node *dn = pdev->dev.of_node; 2754 struct device_node *dn = pdev->dev.of_node;
2778 struct device_node *phy_node; 2755 struct device_node *phy_node;
2779 u32 phy_addr; 2756 u32 phy_addr;
@@ -2838,9 +2815,15 @@ static int mvneta_probe(struct platform_device *pdev)
2838 2815
2839 clk_prepare_enable(pp->clk); 2816 clk_prepare_enable(pp->clk);
2840 2817
2841 pp->base = of_iomap(dn, 0); 2818 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2819 if (!res) {
2820 err = -ENODEV;
2821 goto err_clk;
2822 }
2823
2824 pp->base = devm_ioremap_resource(&pdev->dev, res);
2842 if (pp->base == NULL) { 2825 if (pp->base == NULL) {
2843 err = -ENOMEM; 2826 err = PTR_ERR(pp->base);
2844 goto err_clk; 2827 goto err_clk;
2845 } 2828 }
2846 2829
@@ -2848,7 +2831,7 @@ static int mvneta_probe(struct platform_device *pdev)
2848 pp->stats = alloc_percpu(struct mvneta_pcpu_stats); 2831 pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
2849 if (!pp->stats) { 2832 if (!pp->stats) {
2850 err = -ENOMEM; 2833 err = -ENOMEM;
2851 goto err_unmap; 2834 goto err_clk;
2852 } 2835 }
2853 2836
2854 for_each_possible_cpu(cpu) { 2837 for_each_possible_cpu(cpu) {
@@ -2913,8 +2896,6 @@ err_deinit:
2913 mvneta_deinit(pp); 2896 mvneta_deinit(pp);
2914err_free_stats: 2897err_free_stats:
2915 free_percpu(pp->stats); 2898 free_percpu(pp->stats);
2916err_unmap:
2917 iounmap(pp->base);
2918err_clk: 2899err_clk:
2919 clk_disable_unprepare(pp->clk); 2900 clk_disable_unprepare(pp->clk);
2920err_free_irq: 2901err_free_irq:
@@ -2934,7 +2915,6 @@ static int mvneta_remove(struct platform_device *pdev)
2934 mvneta_deinit(pp); 2915 mvneta_deinit(pp);
2935 clk_disable_unprepare(pp->clk); 2916 clk_disable_unprepare(pp->clk);
2936 free_percpu(pp->stats); 2917 free_percpu(pp->stats);
2937 iounmap(pp->base);
2938 irq_dispose_mapping(dev->irq); 2918 irq_dispose_mapping(dev->irq);
2939 free_netdev(dev); 2919 free_netdev(dev);
2940 2920
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 936c15364739..d413e60071d4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2681,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2681 2681
2682static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2682static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2683{ 2683{
2684 int ret = __mlx4_init_one(pdev, 0); 2684 const struct pci_device_id *id;
2685 int ret;
2686
2687 id = pci_match_id(mlx4_pci_table, pdev);
2688 ret = __mlx4_init_one(pdev, id->driver_data);
2685 2689
2686 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2690 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2687} 2691}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 727b546a9eb8..e0c92e0e5e1d 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -23,6 +23,7 @@
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <linux/mii.h> 24#include <linux/mii.h>
25#include <linux/eeprom_93cx6.h> 25#include <linux/eeprom_93cx6.h>
26#include <linux/regulator/consumer.h>
26 27
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28 29
@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
83 * @rc_rxqcr: Cached copy of KS_RXQCR. 84 * @rc_rxqcr: Cached copy of KS_RXQCR.
84 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom 85 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
85 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. 86 * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
87 * @vdd_reg: Optional regulator supplying the chip
86 * 88 *
87 * The @lock ensures that the chip is protected when certain operations are 89 * The @lock ensures that the chip is protected when certain operations are
88 * in progress. When the read or write packet transfer is in progress, most 90 * in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@ struct ks8851_net {
130 struct spi_transfer spi_xfer2[2]; 132 struct spi_transfer spi_xfer2[2];
131 133
132 struct eeprom_93cx6 eeprom; 134 struct eeprom_93cx6 eeprom;
135 struct regulator *vdd_reg;
133}; 136};
134 137
135static int msg_enable; 138static int msg_enable;
@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
1414 ks->spidev = spi; 1417 ks->spidev = spi;
1415 ks->tx_space = 6144; 1418 ks->tx_space = 6144;
1416 1419
1420 ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
1421 if (IS_ERR(ks->vdd_reg)) {
1422 ret = PTR_ERR(ks->vdd_reg);
1423 if (ret == -EPROBE_DEFER)
1424 goto err_reg;
1425 } else {
1426 ret = regulator_enable(ks->vdd_reg);
1427 if (ret) {
1428 dev_err(&spi->dev, "regulator enable fail: %d\n",
1429 ret);
1430 goto err_reg_en;
1431 }
1432 }
1433
1434
1417 mutex_init(&ks->lock); 1435 mutex_init(&ks->lock);
1418 spin_lock_init(&ks->statelock); 1436 spin_lock_init(&ks->statelock);
1419 1437
@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
1508err_netdev: 1526err_netdev:
1509 free_irq(ndev->irq, ks); 1527 free_irq(ndev->irq, ks);
1510 1528
1511err_id:
1512err_irq: 1529err_irq:
1530err_id:
1531 if (!IS_ERR(ks->vdd_reg))
1532 regulator_disable(ks->vdd_reg);
1533err_reg_en:
1534 if (!IS_ERR(ks->vdd_reg))
1535 regulator_put(ks->vdd_reg);
1536err_reg:
1513 free_netdev(ndev); 1537 free_netdev(ndev);
1514 return ret; 1538 return ret;
1515} 1539}
@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
1523 1547
1524 unregister_netdev(priv->netdev); 1548 unregister_netdev(priv->netdev);
1525 free_irq(spi->irq, priv); 1549 free_irq(spi->irq, priv);
1550 if (!IS_ERR(priv->vdd_reg)) {
1551 regulator_disable(priv->vdd_reg);
1552 regulator_put(priv->vdd_reg);
1553 }
1526 free_netdev(priv->netdev); 1554 free_netdev(priv->netdev);
1527 1555
1528 return 0; 1556 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ce2cfddbed50..656c65ddadb4 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4765,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev,
4765 ndev->features = ndev->hw_features; 4765 ndev->features = ndev->hw_features;
4766 ndev->vlan_features = ndev->hw_features; 4766 ndev->vlan_features = ndev->hw_features;
4767 /* vlan gets same features (except vlan filter) */ 4767 /* vlan gets same features (except vlan filter) */
4768 ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4768 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4769 NETIF_F_HW_VLAN_CTAG_TX |
4770 NETIF_F_HW_VLAN_CTAG_RX);
4769 4771
4770 if (test_bit(QL_DMA64, &qdev->flags)) 4772 if (test_bit(QL_DMA64, &qdev->flags))
4771 ndev->features |= NETIF_F_HIGHDMA; 4773 ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ffd4d12acf6d..7d6d8ec676c8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2229,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev)
2229 goto clean_ale_ret; 2229 goto clean_ale_ret;
2230 } 2230 }
2231 2231
2232 if (cpts_register(&pdev->dev, priv->cpts,
2233 data->cpts_clock_mult, data->cpts_clock_shift))
2234 dev_err(priv->dev, "error registering cpts device\n");
2235
2236 cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", 2232 cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
2237 &ss_res->start, ndev->irq); 2233 &ss_res->start, ndev->irq);
2238 2234
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 364d0c7952c0..88ef27067bf2 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
355 int i; 355 int i;
356 356
357 spin_lock_irqsave(&ctlr->lock, flags); 357 spin_lock_irqsave(&ctlr->lock, flags);
358 if (ctlr->state != CPDMA_STATE_ACTIVE) { 358 if (ctlr->state == CPDMA_STATE_TEARDOWN) {
359 spin_unlock_irqrestore(&ctlr->lock, flags); 359 spin_unlock_irqrestore(&ctlr->lock, flags);
360 return -EINVAL; 360 return -EINVAL;
361 } 361 }
@@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
891 unsigned timeout; 891 unsigned timeout;
892 892
893 spin_lock_irqsave(&chan->lock, flags); 893 spin_lock_irqsave(&chan->lock, flags);
894 if (chan->state != CPDMA_STATE_ACTIVE) { 894 if (chan->state == CPDMA_STATE_TEARDOWN) {
895 spin_unlock_irqrestore(&chan->lock, flags); 895 spin_unlock_irqrestore(&chan->lock, flags);
896 return -EINVAL; 896 return -EINVAL;
897 } 897 }
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index cd9b164a0434..8f0e69ce07ca 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev)
1532 struct device *emac_dev = &ndev->dev; 1532 struct device *emac_dev = &ndev->dev;
1533 u32 cnt; 1533 u32 cnt;
1534 struct resource *res; 1534 struct resource *res;
1535 int ret; 1535 int q, m, ret;
1536 int res_num = 0, irq_num = 0;
1536 int i = 0; 1537 int i = 0;
1537 int k = 0;
1538 struct emac_priv *priv = netdev_priv(ndev); 1538 struct emac_priv *priv = netdev_priv(ndev);
1539 1539
1540 pm_runtime_get(&priv->pdev->dev); 1540 pm_runtime_get(&priv->pdev->dev);
@@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev)
1564 } 1564 }
1565 1565
1566 /* Request IRQ */ 1566 /* Request IRQ */
1567 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
1568 res_num))) {
1569 for (irq_num = res->start; irq_num <= res->end; irq_num++) {
1570 dev_err(emac_dev, "Request IRQ %d\n", irq_num);
1571 if (request_irq(irq_num, emac_irq, 0, ndev->name,
1572 ndev)) {
1573 dev_err(emac_dev,
1574 "DaVinci EMAC: request_irq() failed\n");
1575 ret = -EBUSY;
1567 1576
1568 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1569 for (i = res->start; i <= res->end; i++) {
1570 if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
1571 0, ndev->name, ndev))
1572 goto rollback; 1577 goto rollback;
1578 }
1573 } 1579 }
1574 k++; 1580 res_num++;
1575 } 1581 }
1582 /* prepare counters for rollback in case of an error */
1583 res_num--;
1584 irq_num--;
1576 1585
1577 /* Start/Enable EMAC hardware */ 1586 /* Start/Enable EMAC hardware */
1578 emac_hw_enable(priv); 1587 emac_hw_enable(priv);
@@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev)
1639 1648
1640 return 0; 1649 return 0;
1641 1650
1642rollback:
1643
1644 dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
1645 ret = -EBUSY;
1646err: 1651err:
1652 emac_int_disable(priv);
1653 napi_disable(&priv->napi);
1654
1655rollback:
1656 for (q = res_num; q >= 0; q--) {
1657 res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
1658 /* at the first iteration, irq_num is already set to the
1659 * right value
1660 */
1661 if (q != res_num)
1662 irq_num = res->end;
1663
1664 for (m = irq_num; m >= res->start; m--)
1665 free_irq(m, ndev);
1666 }
1667 cpdma_ctlr_stop(priv->dma);
1647 pm_runtime_put(&priv->pdev->dev); 1668 pm_runtime_put(&priv->pdev->dev);
1648 return ret; 1669 return ret;
1649} 1670}
@@ -1659,6 +1680,9 @@ err:
1659 */ 1680 */
1660static int emac_dev_stop(struct net_device *ndev) 1681static int emac_dev_stop(struct net_device *ndev)
1661{ 1682{
1683 struct resource *res;
1684 int i = 0;
1685 int irq_num;
1662 struct emac_priv *priv = netdev_priv(ndev); 1686 struct emac_priv *priv = netdev_priv(ndev);
1663 struct device *emac_dev = &ndev->dev; 1687 struct device *emac_dev = &ndev->dev;
1664 1688
@@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev)
1674 if (priv->phydev) 1698 if (priv->phydev)
1675 phy_disconnect(priv->phydev); 1699 phy_disconnect(priv->phydev);
1676 1700
1701 /* Free IRQ */
1702 while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
1703 for (irq_num = res->start; irq_num <= res->end; irq_num++)
1704 free_irq(irq_num, priv->ndev);
1705 i++;
1706 }
1707
1677 if (netif_msg_drv(priv)) 1708 if (netif_msg_drv(priv))
1678 dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); 1709 dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
1679 1710
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ef312bc6b865..6ac20a6738f4 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
923 if (rc) { 923 if (rc) {
924 dev_err(&pdev->dev, 924 dev_err(&pdev->dev,
925 "32-bit PCI DMA addresses not supported by the card!?\n"); 925 "32-bit PCI DMA addresses not supported by the card!?\n");
926 goto err_out; 926 goto err_out_pci_disable;
927 } 927 }
928 928
929 /* sanity check */ 929 /* sanity check */
@@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
931 (pci_resource_len(pdev, 1) < io_size)) { 931 (pci_resource_len(pdev, 1) < io_size)) {
932 rc = -EIO; 932 rc = -EIO;
933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); 933 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
934 goto err_out; 934 goto err_out_pci_disable;
935 } 935 }
936 936
937 pioaddr = pci_resource_start(pdev, 0); 937 pioaddr = pci_resource_start(pdev, 0);
@@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
942 dev = alloc_etherdev(sizeof(struct rhine_private)); 942 dev = alloc_etherdev(sizeof(struct rhine_private));
943 if (!dev) { 943 if (!dev) {
944 rc = -ENOMEM; 944 rc = -ENOMEM;
945 goto err_out; 945 goto err_out_pci_disable;
946 } 946 }
947 SET_NETDEV_DEV(dev, &pdev->dev); 947 SET_NETDEV_DEV(dev, &pdev->dev);
948 948
@@ -1084,6 +1084,8 @@ err_out_free_res:
1084 pci_release_regions(pdev); 1084 pci_release_regions(pdev);
1085err_out_free_netdev: 1085err_out_free_netdev:
1086 free_netdev(dev); 1086 free_netdev(dev);
1087err_out_pci_disable:
1088 pci_disable_device(pdev);
1087err_out: 1089err_out:
1088 return rc; 1090 return rc;
1089} 1091}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index c14d39bf32d0..d7b2e947184b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
180 dev->tx_queue_len = TX_Q_LIMIT; 180 dev->tx_queue_len = TX_Q_LIMIT;
181 181
182 dev->features |= IFB_FEATURES; 182 dev->features |= IFB_FEATURES;
183 dev->vlan_features |= IFB_FEATURES; 183 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
184 NETIF_F_HW_VLAN_STAG_TX);
184 185
185 dev->flags |= IFF_NOARP; 186 dev->flags |= IFF_NOARP;
186 dev->flags &= ~IFF_MULTICAST; 187 dev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b970f7624c0..2f6989b1e0dc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach);
683int phy_suspend(struct phy_device *phydev) 683int phy_suspend(struct phy_device *phydev)
684{ 684{
685 struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); 685 struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
686 struct ethtool_wolinfo wol; 686 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
687 687
688 /* If the device has WOL enabled, we cannot suspend the PHY */ 688 /* If the device has WOL enabled, we cannot suspend the PHY */
689 wol.cmd = ETHTOOL_GWOL;
690 phy_ethtool_get_wol(phydev, &wol); 689 phy_ethtool_get_wol(phydev, &wol);
691 if (wol.wolopts) 690 if (wol.wolopts)
692 return -EBUSY; 691 return -EBUSY;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dbff290ed0e4..d350d2795e10 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver;
68static int cdc_ncm_setup(struct usbnet *dev) 68static int cdc_ncm_setup(struct usbnet *dev)
69{ 69{
70 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 70 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
71 struct usb_cdc_ncm_ntb_parameters ncm_parm;
72 u32 val; 71 u32 val;
73 u8 flags; 72 u8 flags;
74 u8 iface_no; 73 u8 iface_no;
@@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
82 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, 81 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
83 USB_TYPE_CLASS | USB_DIR_IN 82 USB_TYPE_CLASS | USB_DIR_IN
84 |USB_RECIP_INTERFACE, 83 |USB_RECIP_INTERFACE,
85 0, iface_no, &ncm_parm, 84 0, iface_no, &ctx->ncm_parm,
86 sizeof(ncm_parm)); 85 sizeof(ctx->ncm_parm));
87 if (err < 0) { 86 if (err < 0) {
88 dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); 87 dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
89 return err; /* GET_NTB_PARAMETERS is required */ 88 return err; /* GET_NTB_PARAMETERS is required */
90 } 89 }
91 90
92 /* read correct set of parameters according to device mode */ 91 /* read correct set of parameters according to device mode */
93 ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize); 92 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
94 ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize); 93 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
95 ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder); 94 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
96 ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor); 95 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
97 ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment); 96 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
98 /* devices prior to NCM Errata shall set this field to zero */ 97 /* devices prior to NCM Errata shall set this field to zero */
99 ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams); 98 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
100 ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported); 99 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
101 100
102 /* there are some minor differences in NCM and MBIM defaults */ 101 /* there are some minor differences in NCM and MBIM defaults */
103 if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { 102 if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
146 } 145 }
147 146
148 /* inform device about NTB input size changes */ 147 /* inform device about NTB input size changes */
149 if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) { 148 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
150 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); 149 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
151 150
152 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, 151 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
162 dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", 161 dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
163 CDC_NCM_NTB_MAX_SIZE_TX); 162 CDC_NCM_NTB_MAX_SIZE_TX);
164 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; 163 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
165
166 /* Adding a pad byte here simplifies the handling in
167 * cdc_ncm_fill_tx_frame, by making tx_max always
168 * represent the real skb max size.
169 */
170 if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
171 ctx->tx_max++;
172
173 } 164 }
174 165
175 /* 166 /*
@@ -439,6 +430,10 @@ advance:
439 goto error2; 430 goto error2;
440 } 431 }
441 432
433 /* initialize data interface */
434 if (cdc_ncm_setup(dev))
435 goto error2;
436
442 /* configure data interface */ 437 /* configure data interface */
443 temp = usb_set_interface(dev->udev, iface_no, data_altsetting); 438 temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
444 if (temp) { 439 if (temp) {
@@ -453,12 +448,6 @@ advance:
453 goto error2; 448 goto error2;
454 } 449 }
455 450
456 /* initialize data interface */
457 if (cdc_ncm_setup(dev)) {
458 dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
459 goto error2;
460 }
461
462 usb_set_intfdata(ctx->data, dev); 451 usb_set_intfdata(ctx->data, dev);
463 usb_set_intfdata(ctx->control, dev); 452 usb_set_intfdata(ctx->control, dev);
464 453
@@ -475,6 +464,15 @@ advance:
475 dev->hard_mtu = ctx->tx_max; 464 dev->hard_mtu = ctx->tx_max;
476 dev->rx_urb_size = ctx->rx_max; 465 dev->rx_urb_size = ctx->rx_max;
477 466
467 /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
468 * outside the sane range. Adding a pad byte here if necessary
469 * simplifies the handling in cdc_ncm_fill_tx_frame, making
470 * tx_max always represent the real skb max size.
471 */
472 if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
473 ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
474 ctx->tx_max++;
475
478 return 0; 476 return 0;
479 477
480error2: 478error2:
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index dd10d5817d2a..f9e96c427558 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
752// precondition: never called in_interrupt 752// precondition: never called in_interrupt
753static void usbnet_terminate_urbs(struct usbnet *dev) 753static void usbnet_terminate_urbs(struct usbnet *dev)
754{ 754{
755 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
756 DECLARE_WAITQUEUE(wait, current); 755 DECLARE_WAITQUEUE(wait, current);
757 int temp; 756 int temp;
758 757
759 /* ensure there are no more active urbs */ 758 /* ensure there are no more active urbs */
760 add_wait_queue(&unlink_wakeup, &wait); 759 add_wait_queue(&dev->wait, &wait);
761 set_current_state(TASK_UNINTERRUPTIBLE); 760 set_current_state(TASK_UNINTERRUPTIBLE);
762 dev->wait = &unlink_wakeup;
763 temp = unlink_urbs(dev, &dev->txq) + 761 temp = unlink_urbs(dev, &dev->txq) +
764 unlink_urbs(dev, &dev->rxq); 762 unlink_urbs(dev, &dev->rxq);
765 763
@@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
773 "waited for %d urb completions\n", temp); 771 "waited for %d urb completions\n", temp);
774 } 772 }
775 set_current_state(TASK_RUNNING); 773 set_current_state(TASK_RUNNING);
776 dev->wait = NULL; 774 remove_wait_queue(&dev->wait, &wait);
777 remove_wait_queue(&unlink_wakeup, &wait);
778} 775}
779 776
780int usbnet_stop (struct net_device *net) 777int usbnet_stop (struct net_device *net)
781{ 778{
782 struct usbnet *dev = netdev_priv(net); 779 struct usbnet *dev = netdev_priv(net);
783 struct driver_info *info = dev->driver_info; 780 struct driver_info *info = dev->driver_info;
784 int retval; 781 int retval, pm;
785 782
786 clear_bit(EVENT_DEV_OPEN, &dev->flags); 783 clear_bit(EVENT_DEV_OPEN, &dev->flags);
787 netif_stop_queue (net); 784 netif_stop_queue (net);
@@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net)
791 net->stats.rx_packets, net->stats.tx_packets, 788 net->stats.rx_packets, net->stats.tx_packets,
792 net->stats.rx_errors, net->stats.tx_errors); 789 net->stats.rx_errors, net->stats.tx_errors);
793 790
791 /* to not race resume */
792 pm = usb_autopm_get_interface(dev->intf);
794 /* allow minidriver to stop correctly (wireless devices to turn off 793 /* allow minidriver to stop correctly (wireless devices to turn off
795 * radio etc) */ 794 * radio etc) */
796 if (info->stop) { 795 if (info->stop) {
@@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net)
817 dev->flags = 0; 816 dev->flags = 0;
818 del_timer_sync (&dev->delay); 817 del_timer_sync (&dev->delay);
819 tasklet_kill (&dev->bh); 818 tasklet_kill (&dev->bh);
819 if (!pm)
820 usb_autopm_put_interface(dev->intf);
821
820 if (info->manage_power && 822 if (info->manage_power &&
821 !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) 823 !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
822 info->manage_power(dev, 0); 824 info->manage_power(dev, 0);
@@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
1437 /* restart RX again after disabling due to high error rate */ 1439 /* restart RX again after disabling due to high error rate */
1438 clear_bit(EVENT_RX_KILL, &dev->flags); 1440 clear_bit(EVENT_RX_KILL, &dev->flags);
1439 1441
1440 // waiting for all pending urbs to complete? 1442 /* waiting for all pending urbs to complete?
1441 if (dev->wait) { 1443 * only then can we forgo submitting anew
1442 if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { 1444 */
1443 wake_up (dev->wait); 1445 if (waitqueue_active(&dev->wait)) {
1444 } 1446 if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
1447 wake_up_all(&dev->wait);
1445 1448
1446 // or are we maybe short a few urbs? 1449 // or are we maybe short a few urbs?
1447 } else if (netif_running (dev->net) && 1450 } else if (netif_running (dev->net) &&
@@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1580 dev->driver_name = name; 1583 dev->driver_name = name;
1581 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV 1584 dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
1582 | NETIF_MSG_PROBE | NETIF_MSG_LINK); 1585 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
1586 init_waitqueue_head(&dev->wait);
1583 skb_queue_head_init (&dev->rxq); 1587 skb_queue_head_init (&dev->rxq);
1584 skb_queue_head_init (&dev->txq); 1588 skb_queue_head_init (&dev->txq);
1585 skb_queue_head_init (&dev->done); 1589 skb_queue_head_init (&dev->done);
@@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
1791 spin_unlock_irq(&dev->txq.lock); 1795 spin_unlock_irq(&dev->txq.lock);
1792 1796
1793 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { 1797 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
1794 /* handle remote wakeup ASAP */ 1798 /* handle remote wakeup ASAP
1795 if (!dev->wait && 1799 * we cannot race against stop
1796 netif_device_present(dev->net) && 1800 */
1801 if (netif_device_present(dev->net) &&
1797 !timer_pending(&dev->delay) && 1802 !timer_pending(&dev->delay) &&
1798 !test_bit(EVENT_RX_HALT, &dev->flags)) 1803 !test_bit(EVENT_RX_HALT, &dev->flags))
1799 rx_alloc_submit(dev, GFP_NOIO); 1804 rx_alloc_submit(dev, GFP_NOIO);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5b374370f71c..c0e7c64765ab 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -286,7 +286,10 @@ static void veth_setup(struct net_device *dev)
286 dev->features |= NETIF_F_LLTX; 286 dev->features |= NETIF_F_LLTX;
287 dev->features |= VETH_FEATURES; 287 dev->features |= VETH_FEATURES;
288 dev->vlan_features = dev->features & 288 dev->vlan_features = dev->features &
289 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); 289 ~(NETIF_F_HW_VLAN_CTAG_TX |
290 NETIF_F_HW_VLAN_STAG_TX |
291 NETIF_F_HW_VLAN_CTAG_RX |
292 NETIF_F_HW_VLAN_STAG_RX);
290 dev->destructor = veth_dev_free; 293 dev->destructor = veth_dev_free;
291 294
292 dev->hw_features = VETH_FEATURES; 295 dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5632a99cbbd2..841b60831df1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
671 if (err) 671 if (err)
672 break; 672 break;
673 } while (rq->vq->num_free); 673 } while (rq->vq->num_free);
674 if (unlikely(!virtqueue_kick(rq->vq))) 674 virtqueue_kick(rq->vq);
675 return false;
676 return !oom; 675 return !oom;
677} 676}
678 677
@@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
877 err = xmit_skb(sq, skb); 876 err = xmit_skb(sq, skb);
878 877
879 /* This should not happen! */ 878 /* This should not happen! */
880 if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) { 879 if (unlikely(err)) {
881 dev->stats.tx_fifo_errors++; 880 dev->stats.tx_fifo_errors++;
882 if (net_ratelimit()) 881 if (net_ratelimit())
883 dev_warn(&dev->dev, 882 dev_warn(&dev->dev,
@@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
886 kfree_skb(skb); 885 kfree_skb(skb);
887 return NETDEV_TX_OK; 886 return NETDEV_TX_OK;
888 } 887 }
888 virtqueue_kick(sq->vq);
889 889
890 /* Don't wait up for transmitted skbs to be freed. */ 890 /* Don't wait up for transmitted skbs to be freed. */
891 skb_orphan(skb); 891 skb_orphan(skb);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b0f705c2378f..1236812c7be6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1318 1318
1319 neigh_release(n); 1319 neigh_release(n);
1320 1320
1321 if (reply == NULL)
1322 goto out;
1323
1321 skb_reset_mac_header(reply); 1324 skb_reset_mac_header(reply);
1322 __skb_pull(reply, skb_network_offset(reply)); 1325 __skb_pull(reply, skb_network_offset(reply));
1323 reply->ip_summed = CHECKSUM_UNNECESSARY; 1326 reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1339,15 +1342,103 @@ out:
1339} 1342}
1340 1343
1341#if IS_ENABLED(CONFIG_IPV6) 1344#if IS_ENABLED(CONFIG_IPV6)
1345
1346static struct sk_buff *vxlan_na_create(struct sk_buff *request,
1347 struct neighbour *n, bool isrouter)
1348{
1349 struct net_device *dev = request->dev;
1350 struct sk_buff *reply;
1351 struct nd_msg *ns, *na;
1352 struct ipv6hdr *pip6;
1353 u8 *daddr;
1354 int na_olen = 8; /* opt hdr + ETH_ALEN for target */
1355 int ns_olen;
1356 int i, len;
1357
1358 if (dev == NULL)
1359 return NULL;
1360
1361 len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
1362 sizeof(*na) + na_olen + dev->needed_tailroom;
1363 reply = alloc_skb(len, GFP_ATOMIC);
1364 if (reply == NULL)
1365 return NULL;
1366
1367 reply->protocol = htons(ETH_P_IPV6);
1368 reply->dev = dev;
1369 skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
1370 skb_push(reply, sizeof(struct ethhdr));
1371 skb_set_mac_header(reply, 0);
1372
1373 ns = (struct nd_msg *)skb_transport_header(request);
1374
1375 daddr = eth_hdr(request)->h_source;
1376 ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
1377 for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
1378 if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
1379 daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
1380 break;
1381 }
1382 }
1383
1384 /* Ethernet header */
1385 ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
1386 ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
1387 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
1388 reply->protocol = htons(ETH_P_IPV6);
1389
1390 skb_pull(reply, sizeof(struct ethhdr));
1391 skb_set_network_header(reply, 0);
1392 skb_put(reply, sizeof(struct ipv6hdr));
1393
1394 /* IPv6 header */
1395
1396 pip6 = ipv6_hdr(reply);
1397 memset(pip6, 0, sizeof(struct ipv6hdr));
1398 pip6->version = 6;
1399 pip6->priority = ipv6_hdr(request)->priority;
1400 pip6->nexthdr = IPPROTO_ICMPV6;
1401 pip6->hop_limit = 255;
1402 pip6->daddr = ipv6_hdr(request)->saddr;
1403 pip6->saddr = *(struct in6_addr *)n->primary_key;
1404
1405 skb_pull(reply, sizeof(struct ipv6hdr));
1406 skb_set_transport_header(reply, 0);
1407
1408 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
1409
1410 /* Neighbor Advertisement */
1411 memset(na, 0, sizeof(*na)+na_olen);
1412 na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
1413 na->icmph.icmp6_router = isrouter;
1414 na->icmph.icmp6_override = 1;
1415 na->icmph.icmp6_solicited = 1;
1416 na->target = ns->target;
1417 ether_addr_copy(&na->opt[2], n->ha);
1418 na->opt[0] = ND_OPT_TARGET_LL_ADDR;
1419 na->opt[1] = na_olen >> 3;
1420
1421 na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
1422 &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
1423 csum_partial(na, sizeof(*na)+na_olen, 0));
1424
1425 pip6->payload_len = htons(sizeof(*na)+na_olen);
1426
1427 skb_push(reply, sizeof(struct ipv6hdr));
1428
1429 reply->ip_summed = CHECKSUM_UNNECESSARY;
1430
1431 return reply;
1432}
1433
1342static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) 1434static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1343{ 1435{
1344 struct vxlan_dev *vxlan = netdev_priv(dev); 1436 struct vxlan_dev *vxlan = netdev_priv(dev);
1345 struct neighbour *n; 1437 struct nd_msg *msg;
1346 union vxlan_addr ipa;
1347 const struct ipv6hdr *iphdr; 1438 const struct ipv6hdr *iphdr;
1348 const struct in6_addr *saddr, *daddr; 1439 const struct in6_addr *saddr, *daddr;
1349 struct nd_msg *msg; 1440 struct neighbour *n;
1350 struct inet6_dev *in6_dev = NULL; 1441 struct inet6_dev *in6_dev;
1351 1442
1352 in6_dev = __in6_dev_get(dev); 1443 in6_dev = __in6_dev_get(dev);
1353 if (!in6_dev) 1444 if (!in6_dev)
@@ -1360,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1360 saddr = &iphdr->saddr; 1451 saddr = &iphdr->saddr;
1361 daddr = &iphdr->daddr; 1452 daddr = &iphdr->daddr;
1362 1453
1363 if (ipv6_addr_loopback(daddr) ||
1364 ipv6_addr_is_multicast(daddr))
1365 goto out;
1366
1367 msg = (struct nd_msg *)skb_transport_header(skb); 1454 msg = (struct nd_msg *)skb_transport_header(skb);
1368 if (msg->icmph.icmp6_code != 0 || 1455 if (msg->icmph.icmp6_code != 0 ||
1369 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) 1456 msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
1370 goto out; 1457 goto out;
1371 1458
1372 n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); 1459 if (ipv6_addr_loopback(daddr) ||
1460 ipv6_addr_is_multicast(&msg->target))
1461 goto out;
1462
1463 n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
1373 1464
1374 if (n) { 1465 if (n) {
1375 struct vxlan_fdb *f; 1466 struct vxlan_fdb *f;
1467 struct sk_buff *reply;
1376 1468
1377 if (!(n->nud_state & NUD_CONNECTED)) { 1469 if (!(n->nud_state & NUD_CONNECTED)) {
1378 neigh_release(n); 1470 neigh_release(n);
@@ -1386,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1386 goto out; 1478 goto out;
1387 } 1479 }
1388 1480
1389 ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, 1481 reply = vxlan_na_create(skb, n,
1390 !!in6_dev->cnf.forwarding, 1482 !!(f ? f->flags & NTF_ROUTER : 0));
1391 true, false, false); 1483
1392 neigh_release(n); 1484 neigh_release(n);
1485
1486 if (reply == NULL)
1487 goto out;
1488
1489 if (netif_rx_ni(reply) == NET_RX_DROP)
1490 dev->stats.rx_dropped++;
1491
1393 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1492 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1394 ipa.sin6.sin6_addr = *daddr; 1493 union vxlan_addr ipa = {
1395 ipa.sa.sa_family = AF_INET6; 1494 .sin6.sin6_addr = msg->target,
1495 .sa.sa_family = AF_INET6,
1496 };
1497
1396 vxlan_ip_miss(dev, &ipa); 1498 vxlan_ip_miss(dev, &ipa);
1397 } 1499 }
1398 1500
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 303ce27964c1..9078a6c5a74e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1548,6 +1548,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1548 if (reg != last_val) 1548 if (reg != last_val)
1549 return true; 1549 return true;
1550 1550
1551 udelay(1);
1551 last_val = reg; 1552 last_val = reg;
1552 if ((reg & 0x7E7FFFEF) == 0x00702400) 1553 if ((reg & 0x7E7FFFEF) == 0x00702400)
1553 continue; 1554 continue;
@@ -1560,8 +1561,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1560 default: 1561 default:
1561 return true; 1562 return true;
1562 } 1563 }
1563
1564 udelay(1);
1565 } while (count-- > 0); 1564 } while (count-- > 0);
1566 1565
1567 return false; 1566 return false;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f042a18c8495..55897d508a76 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2063,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
2063 2063
2064 ATH_TXBUF_RESET(bf); 2064 ATH_TXBUF_RESET(bf);
2065 2065
2066 if (tid) { 2066 if (tid && ieee80211_is_data_present(hdr->frame_control)) {
2067 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; 2067 fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
2068 seqno = tid->seq_next; 2068 seqno = tid->seq_next;
2069 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 2069 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@@ -2186,7 +2186,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2186 txq->stopped = true; 2186 txq->stopped = true;
2187 } 2187 }
2188 2188
2189 if (txctl->an) 2189 if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
2190 tid = ath_get_skb_tid(sc, txctl->an, skb); 2190 tid = ath_get_skb_tid(sc, txctl->an, skb);
2191 2191
2192 if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { 2192 if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 119ee6eaf1c3..ddaa9efd053d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -1948,8 +1948,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
1948 if (pkt_pad == NULL) 1948 if (pkt_pad == NULL)
1949 return -ENOMEM; 1949 return -ENOMEM;
1950 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); 1950 ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
1951 if (unlikely(ret < 0)) 1951 if (unlikely(ret < 0)) {
1952 kfree_skb(pkt_pad);
1952 return ret; 1953 return ret;
1954 }
1953 memcpy(pkt_pad->data, 1955 memcpy(pkt_pad->data,
1954 pkt->data + pkt->len - tail_chop, 1956 pkt->data + pkt->len - tail_chop,
1955 tail_chop); 1957 tail_chop);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 7f8b5d156c8c..41d4a8167dc3 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
5460 5460
5461 rt2800_bbp_write(rt2x00dev, 68, 0x0b); 5461 rt2800_bbp_write(rt2x00dev, 68, 0x0b);
5462 5462
5463 rt2800_bbp_write(rt2x00dev, 69, 0x0d); 5463 rt2800_bbp_write(rt2x00dev, 69, 0x12);
5464 rt2800_bbp_write(rt2x00dev, 70, 0x06);
5465 rt2800_bbp_write(rt2x00dev, 73, 0x13); 5464 rt2800_bbp_write(rt2x00dev, 73, 0x13);
5466 rt2800_bbp_write(rt2x00dev, 75, 0x46); 5465 rt2800_bbp_write(rt2x00dev, 75, 0x46);
5467 rt2800_bbp_write(rt2x00dev, 76, 0x28); 5466 rt2800_bbp_write(rt2x00dev, 76, 0x28);
5468 5467
5469 rt2800_bbp_write(rt2x00dev, 77, 0x59); 5468 rt2800_bbp_write(rt2x00dev, 77, 0x59);
5470 5469
5470 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
5471
5471 rt2800_bbp_write(rt2x00dev, 79, 0x13); 5472 rt2800_bbp_write(rt2x00dev, 79, 0x13);
5472 rt2800_bbp_write(rt2x00dev, 80, 0x05); 5473 rt2800_bbp_write(rt2x00dev, 80, 0x05);
5473 rt2800_bbp_write(rt2x00dev, 81, 0x33); 5474 rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
5510 if (rt2x00_rt(rt2x00dev, RT5392)) { 5511 if (rt2x00_rt(rt2x00dev, RT5392)) {
5511 rt2800_bbp_write(rt2x00dev, 134, 0xd0); 5512 rt2800_bbp_write(rt2x00dev, 134, 0xd0);
5512 rt2800_bbp_write(rt2x00dev, 135, 0xf6); 5513 rt2800_bbp_write(rt2x00dev, 135, 0xf6);
5513 rt2800_bbp_write(rt2x00dev, 148, 0x84);
5514 } 5514 }
5515 5515
5516 rt2800_disable_unused_dac_adc(rt2x00dev); 5516 rt2800_disable_unused_dac_adc(rt2x00dev);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index ed880891cb7c..e9279a8c1e1c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
594 mp_req->mp_resp_bd = NULL; 594 mp_req->mp_resp_bd = NULL;
595 } 595 }
596 if (mp_req->req_buf) { 596 if (mp_req->req_buf) {
597 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 597 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
598 mp_req->req_buf, 598 mp_req->req_buf,
599 mp_req->req_buf_dma); 599 mp_req->req_buf_dma);
600 mp_req->req_buf = NULL; 600 mp_req->req_buf = NULL;
601 } 601 }
602 if (mp_req->resp_buf) { 602 if (mp_req->resp_buf) {
603 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 603 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
604 mp_req->resp_buf, 604 mp_req->resp_buf,
605 mp_req->resp_buf_dma); 605 mp_req->resp_buf_dma);
606 mp_req->resp_buf = NULL; 606 mp_req->resp_buf = NULL;
@@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
622 622
623 mp_req->req_len = sizeof(struct fcp_cmnd); 623 mp_req->req_len = sizeof(struct fcp_cmnd);
624 io_req->data_xfer_len = mp_req->req_len; 624 io_req->data_xfer_len = mp_req->req_len;
625 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 625 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
626 &mp_req->req_buf_dma, 626 &mp_req->req_buf_dma,
627 GFP_ATOMIC); 627 GFP_ATOMIC);
628 if (!mp_req->req_buf) { 628 if (!mp_req->req_buf) {
@@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
631 return FAILED; 631 return FAILED;
632 } 632 }
633 633
634 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 634 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
635 &mp_req->resp_buf_dma, 635 &mp_req->resp_buf_dma,
636 GFP_ATOMIC); 636 GFP_ATOMIC);
637 if (!mp_req->resp_buf) { 637 if (!mp_req->resp_buf) {
@@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
639 bnx2fc_free_mp_resc(io_req); 639 bnx2fc_free_mp_resc(io_req);
640 return FAILED; 640 return FAILED;
641 } 641 }
642 memset(mp_req->req_buf, 0, PAGE_SIZE); 642 memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
643 memset(mp_req->resp_buf, 0, PAGE_SIZE); 643 memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
644 644
645 /* Allocate and map mp_req_bd and mp_resp_bd */ 645 /* Allocate and map mp_req_bd and mp_resp_bd */
646 sz = sizeof(struct fcoe_bd_ctx); 646 sz = sizeof(struct fcoe_bd_ctx);
@@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
665 mp_req_bd = mp_req->mp_req_bd; 665 mp_req_bd = mp_req->mp_req_bd;
666 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 666 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
667 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 667 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
668 mp_req_bd->buf_len = PAGE_SIZE; 668 mp_req_bd->buf_len = CNIC_PAGE_SIZE;
669 mp_req_bd->flags = 0; 669 mp_req_bd->flags = 0;
670 670
671 /* 671 /*
@@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
677 addr = mp_req->resp_buf_dma; 677 addr = mp_req->resp_buf_dma;
678 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 678 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
679 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 679 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
680 mp_resp_bd->buf_len = PAGE_SIZE; 680 mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
681 mp_resp_bd->flags = 0; 681 mp_resp_bd->flags = 0;
682 682
683 return SUCCESS; 683 return SUCCESS;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 4d93177dfb53..d9bae5672273 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
673 673
674 /* Allocate and map SQ */ 674 /* Allocate and map SQ */
675 tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; 675 tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
676 tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 676 tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
677 CNIC_PAGE_MASK;
677 678
678 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, 679 tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
679 &tgt->sq_dma, GFP_KERNEL); 680 &tgt->sq_dma, GFP_KERNEL);
@@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
686 687
687 /* Allocate and map CQ */ 688 /* Allocate and map CQ */
688 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; 689 tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
689 tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 690 tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
691 CNIC_PAGE_MASK;
690 692
691 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, 693 tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
692 &tgt->cq_dma, GFP_KERNEL); 694 &tgt->cq_dma, GFP_KERNEL);
@@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
699 701
700 /* Allocate and map RQ and RQ PBL */ 702 /* Allocate and map RQ and RQ PBL */
701 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; 703 tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
702 tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 704 tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
705 CNIC_PAGE_MASK;
703 706
704 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, 707 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
705 &tgt->rq_dma, GFP_KERNEL); 708 &tgt->rq_dma, GFP_KERNEL);
@@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
710 } 713 }
711 memset(tgt->rq, 0, tgt->rq_mem_size); 714 memset(tgt->rq, 0, tgt->rq_mem_size);
712 715
713 tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); 716 tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
714 tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 717 tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
718 CNIC_PAGE_MASK;
715 719
716 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, 720 tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
717 &tgt->rq_pbl_dma, GFP_KERNEL); 721 &tgt->rq_pbl_dma, GFP_KERNEL);
@@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
722 } 726 }
723 727
724 memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); 728 memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
725 num_pages = tgt->rq_mem_size / PAGE_SIZE; 729 num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
726 page = tgt->rq_dma; 730 page = tgt->rq_dma;
727 pbl = (u32 *)tgt->rq_pbl; 731 pbl = (u32 *)tgt->rq_pbl;
728 732
@@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
731 pbl++; 735 pbl++;
732 *pbl = (u32)((u64)page >> 32); 736 *pbl = (u32)((u64)page >> 32);
733 pbl++; 737 pbl++;
734 page += PAGE_SIZE; 738 page += CNIC_PAGE_SIZE;
735 } 739 }
736 740
737 /* Allocate and map XFERQ */ 741 /* Allocate and map XFERQ */
738 tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; 742 tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
739 tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & 743 tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
740 PAGE_MASK; 744 CNIC_PAGE_MASK;
741 745
742 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, 746 tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
743 &tgt->xferq_dma, GFP_KERNEL); 747 &tgt->xferq_dma, GFP_KERNEL);
@@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
750 754
751 /* Allocate and map CONFQ & CONFQ PBL */ 755 /* Allocate and map CONFQ & CONFQ PBL */
752 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; 756 tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
753 tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & 757 tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
754 PAGE_MASK; 758 CNIC_PAGE_MASK;
755 759
756 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, 760 tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
757 &tgt->confq_dma, GFP_KERNEL); 761 &tgt->confq_dma, GFP_KERNEL);
@@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
763 memset(tgt->confq, 0, tgt->confq_mem_size); 767 memset(tgt->confq, 0, tgt->confq_mem_size);
764 768
765 tgt->confq_pbl_size = 769 tgt->confq_pbl_size =
766 (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); 770 (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
767 tgt->confq_pbl_size = 771 tgt->confq_pbl_size =
768 (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 772 (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
769 773
770 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, 774 tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
771 tgt->confq_pbl_size, 775 tgt->confq_pbl_size,
@@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
777 } 781 }
778 782
779 memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); 783 memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
780 num_pages = tgt->confq_mem_size / PAGE_SIZE; 784 num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
781 page = tgt->confq_dma; 785 page = tgt->confq_dma;
782 pbl = (u32 *)tgt->confq_pbl; 786 pbl = (u32 *)tgt->confq_pbl;
783 787
@@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
786 pbl++; 790 pbl++;
787 *pbl = (u32)((u64)page >> 32); 791 *pbl = (u32)((u64)page >> 32);
788 pbl++; 792 pbl++;
789 page += PAGE_SIZE; 793 page += CNIC_PAGE_SIZE;
790 } 794 }
791 795
792 /* Allocate and map ConnDB */ 796 /* Allocate and map ConnDB */
@@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
805 809
806 /* Allocate and map LCQ */ 810 /* Allocate and map LCQ */
807 tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; 811 tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
808 tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & 812 tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
809 PAGE_MASK; 813 CNIC_PAGE_MASK;
810 814
811 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 815 tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
812 &tgt->lcq_dma, GFP_KERNEL); 816 &tgt->lcq_dma, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e4cf23df4b4f..b87a1933f880 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
61 * yield integral num of page buffers 61 * yield integral num of page buffers
62 */ 62 */
63 /* adjust SQ */ 63 /* adjust SQ */
64 num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; 64 num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
65 if (hba->max_sqes < num_elements_per_pg) 65 if (hba->max_sqes < num_elements_per_pg)
66 hba->max_sqes = num_elements_per_pg; 66 hba->max_sqes = num_elements_per_pg;
67 else if (hba->max_sqes % num_elements_per_pg) 67 else if (hba->max_sqes % num_elements_per_pg)
@@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
69 ~(num_elements_per_pg - 1); 69 ~(num_elements_per_pg - 1);
70 70
71 /* adjust CQ */ 71 /* adjust CQ */
72 num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; 72 num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
73 if (hba->max_cqes < num_elements_per_pg) 73 if (hba->max_cqes < num_elements_per_pg)
74 hba->max_cqes = num_elements_per_pg; 74 hba->max_cqes = num_elements_per_pg;
75 else if (hba->max_cqes % num_elements_per_pg) 75 else if (hba->max_cqes % num_elements_per_pg)
@@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
77 ~(num_elements_per_pg - 1); 77 ~(num_elements_per_pg - 1);
78 78
79 /* adjust RQ */ 79 /* adjust RQ */
80 num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; 80 num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
81 if (hba->max_rqes < num_elements_per_pg) 81 if (hba->max_rqes < num_elements_per_pg)
82 hba->max_rqes = num_elements_per_pg; 82 hba->max_rqes = num_elements_per_pg;
83 else if (hba->max_rqes % num_elements_per_pg) 83 else if (hba->max_rqes % num_elements_per_pg)
@@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
959 959
960 /* SQ page table */ 960 /* SQ page table */
961 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); 961 memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
962 num_pages = ep->qp.sq_mem_size / PAGE_SIZE; 962 num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
963 page = ep->qp.sq_phys; 963 page = ep->qp.sq_phys;
964 964
965 if (cnic_dev_10g) 965 if (cnic_dev_10g)
@@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
973 ptbl++; 973 ptbl++;
974 *ptbl = (u32) ((u64) page >> 32); 974 *ptbl = (u32) ((u64) page >> 32);
975 ptbl++; 975 ptbl++;
976 page += PAGE_SIZE; 976 page += CNIC_PAGE_SIZE;
977 } else { 977 } else {
978 /* PTE is written in big endian format for 978 /* PTE is written in big endian format for
979 * 5706/5708/5709 devices */ 979 * 5706/5708/5709 devices */
@@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
981 ptbl++; 981 ptbl++;
982 *ptbl = (u32) page; 982 *ptbl = (u32) page;
983 ptbl++; 983 ptbl++;
984 page += PAGE_SIZE; 984 page += CNIC_PAGE_SIZE;
985 } 985 }
986 } 986 }
987 987
988 /* RQ page table */ 988 /* RQ page table */
989 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); 989 memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
990 num_pages = ep->qp.rq_mem_size / PAGE_SIZE; 990 num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
991 page = ep->qp.rq_phys; 991 page = ep->qp.rq_phys;
992 992
993 if (cnic_dev_10g) 993 if (cnic_dev_10g)
@@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
1001 ptbl++; 1001 ptbl++;
1002 *ptbl = (u32) ((u64) page >> 32); 1002 *ptbl = (u32) ((u64) page >> 32);
1003 ptbl++; 1003 ptbl++;
1004 page += PAGE_SIZE; 1004 page += CNIC_PAGE_SIZE;
1005 } else { 1005 } else {
1006 /* PTE is written in big endian format for 1006 /* PTE is written in big endian format for
1007 * 5706/5708/5709 devices */ 1007 * 5706/5708/5709 devices */
@@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
1009 ptbl++; 1009 ptbl++;
1010 *ptbl = (u32) page; 1010 *ptbl = (u32) page;
1011 ptbl++; 1011 ptbl++;
1012 page += PAGE_SIZE; 1012 page += CNIC_PAGE_SIZE;
1013 } 1013 }
1014 } 1014 }
1015 1015
1016 /* CQ page table */ 1016 /* CQ page table */
1017 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); 1017 memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
1018 num_pages = ep->qp.cq_mem_size / PAGE_SIZE; 1018 num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
1019 page = ep->qp.cq_phys; 1019 page = ep->qp.cq_phys;
1020 1020
1021 if (cnic_dev_10g) 1021 if (cnic_dev_10g)
@@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
1029 ptbl++; 1029 ptbl++;
1030 *ptbl = (u32) ((u64) page >> 32); 1030 *ptbl = (u32) ((u64) page >> 32);
1031 ptbl++; 1031 ptbl++;
1032 page += PAGE_SIZE; 1032 page += CNIC_PAGE_SIZE;
1033 } else { 1033 } else {
1034 /* PTE is written in big endian format for 1034 /* PTE is written in big endian format for
1035 * 5706/5708/5709 devices */ 1035 * 5706/5708/5709 devices */
@@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
1037 ptbl++; 1037 ptbl++;
1038 *ptbl = (u32) page; 1038 *ptbl = (u32) page;
1039 ptbl++; 1039 ptbl++;
1040 page += PAGE_SIZE; 1040 page += CNIC_PAGE_SIZE;
1041 } 1041 }
1042 } 1042 }
1043} 1043}
@@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1064 /* Allocate page table memory for SQ which is page aligned */ 1064 /* Allocate page table memory for SQ which is page aligned */
1065 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; 1065 ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
1066 ep->qp.sq_mem_size = 1066 ep->qp.sq_mem_size =
1067 (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1067 (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1068 ep->qp.sq_pgtbl_size = 1068 ep->qp.sq_pgtbl_size =
1069 (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); 1069 (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1070 ep->qp.sq_pgtbl_size = 1070 ep->qp.sq_pgtbl_size =
1071 (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1071 (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1072 1072
1073 ep->qp.sq_pgtbl_virt = 1073 ep->qp.sq_pgtbl_virt =
1074 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, 1074 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1101 /* Allocate page table memory for CQ which is page aligned */ 1101 /* Allocate page table memory for CQ which is page aligned */
1102 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; 1102 ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
1103 ep->qp.cq_mem_size = 1103 ep->qp.cq_mem_size =
1104 (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1104 (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1105 ep->qp.cq_pgtbl_size = 1105 ep->qp.cq_pgtbl_size =
1106 (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); 1106 (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1107 ep->qp.cq_pgtbl_size = 1107 ep->qp.cq_pgtbl_size =
1108 (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1108 (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1109 1109
1110 ep->qp.cq_pgtbl_virt = 1110 ep->qp.cq_pgtbl_virt =
1111 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, 1111 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
1144 /* Allocate page table memory for RQ which is page aligned */ 1144 /* Allocate page table memory for RQ which is page aligned */
1145 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; 1145 ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
1146 ep->qp.rq_mem_size = 1146 ep->qp.rq_mem_size =
1147 (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1147 (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1148 ep->qp.rq_pgtbl_size = 1148 ep->qp.rq_pgtbl_size =
1149 (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); 1149 (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
1150 ep->qp.rq_pgtbl_size = 1150 ep->qp.rq_pgtbl_size =
1151 (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; 1151 (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
1152 1152
1153 ep->qp.rq_pgtbl_virt = 1153 ep->qp.rq_pgtbl_virt =
1154 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, 1154 dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1270 bnx2i_adjust_qp_size(hba); 1270 bnx2i_adjust_qp_size(hba);
1271 1271
1272 iscsi_init.flags = 1272 iscsi_init.flags =
1273 ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; 1273 (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
1274 if (en_tcp_dack) 1274 if (en_tcp_dack)
1275 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; 1275 iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
1276 iscsi_init.reserved0 = 0; 1276 iscsi_init.reserved0 = 0;
@@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1288 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); 1288 ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
1289 iscsi_init.num_ccells_per_conn = hba->num_ccell; 1289 iscsi_init.num_ccells_per_conn = hba->num_ccell;
1290 iscsi_init.num_tasks_per_conn = hba->max_sqes; 1290 iscsi_init.num_tasks_per_conn = hba->max_sqes;
1291 iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; 1291 iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
1292 iscsi_init.sq_num_wqes = hba->max_sqes; 1292 iscsi_init.sq_num_wqes = hba->max_sqes;
1293 iscsi_init.cq_log_wqes_per_page = 1293 iscsi_init.cq_log_wqes_per_page =
1294 (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); 1294 (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
1295 iscsi_init.cq_num_wqes = hba->max_cqes; 1295 iscsi_init.cq_num_wqes = hba->max_cqes;
1296 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + 1296 iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
1297 (PAGE_SIZE - 1)) / PAGE_SIZE; 1297 (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
1298 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + 1298 iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
1299 (PAGE_SIZE - 1)) / PAGE_SIZE; 1299 (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
1300 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; 1300 iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
1301 iscsi_init.rq_num_wqes = hba->max_rqes; 1301 iscsi_init.rq_num_wqes = hba->max_rqes;
1302 1302
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 854dad7d5b03..c8b0aff5bbd4 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
525 struct iscsi_bd *mp_bdt; 525 struct iscsi_bd *mp_bdt;
526 u64 addr; 526 u64 addr;
527 527
528 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 528 hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
529 &hba->mp_bd_dma, GFP_KERNEL); 529 &hba->mp_bd_dma, GFP_KERNEL);
530 if (!hba->mp_bd_tbl) { 530 if (!hba->mp_bd_tbl) {
531 printk(KERN_ERR "unable to allocate Middle Path BDT\n"); 531 printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
533 goto out; 533 goto out;
534 } 534 }
535 535
536 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 536 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
537 CNIC_PAGE_SIZE,
537 &hba->dummy_buf_dma, GFP_KERNEL); 538 &hba->dummy_buf_dma, GFP_KERNEL);
538 if (!hba->dummy_buffer) { 539 if (!hba->dummy_buffer) {
539 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); 540 printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
540 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 541 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
541 hba->mp_bd_tbl, hba->mp_bd_dma); 542 hba->mp_bd_tbl, hba->mp_bd_dma);
542 hba->mp_bd_tbl = NULL; 543 hba->mp_bd_tbl = NULL;
543 rc = -1; 544 rc = -1;
@@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
548 addr = (unsigned long) hba->dummy_buf_dma; 549 addr = (unsigned long) hba->dummy_buf_dma;
549 mp_bdt->buffer_addr_lo = addr & 0xffffffff; 550 mp_bdt->buffer_addr_lo = addr & 0xffffffff;
550 mp_bdt->buffer_addr_hi = addr >> 32; 551 mp_bdt->buffer_addr_hi = addr >> 32;
551 mp_bdt->buffer_length = PAGE_SIZE; 552 mp_bdt->buffer_length = CNIC_PAGE_SIZE;
552 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | 553 mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
553 ISCSI_BD_FIRST_IN_BD_CHAIN; 554 ISCSI_BD_FIRST_IN_BD_CHAIN;
554out: 555out:
@@ -565,12 +566,12 @@ out:
565static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) 566static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
566{ 567{
567 if (hba->mp_bd_tbl) { 568 if (hba->mp_bd_tbl) {
568 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 569 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
569 hba->mp_bd_tbl, hba->mp_bd_dma); 570 hba->mp_bd_tbl, hba->mp_bd_dma);
570 hba->mp_bd_tbl = NULL; 571 hba->mp_bd_tbl = NULL;
571 } 572 }
572 if (hba->dummy_buffer) { 573 if (hba->dummy_buffer) {
573 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 574 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
574 hba->dummy_buffer, hba->dummy_buf_dma); 575 hba->dummy_buffer, hba->dummy_buf_dma);
575 hba->dummy_buffer = NULL; 576 hba->dummy_buffer = NULL;
576 } 577 }
@@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
934 struct bnx2i_conn *bnx2i_conn) 935 struct bnx2i_conn *bnx2i_conn)
935{ 936{
936 if (bnx2i_conn->gen_pdu.resp_bd_tbl) { 937 if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
937 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 938 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
938 bnx2i_conn->gen_pdu.resp_bd_tbl, 939 bnx2i_conn->gen_pdu.resp_bd_tbl,
939 bnx2i_conn->gen_pdu.resp_bd_dma); 940 bnx2i_conn->gen_pdu.resp_bd_dma);
940 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; 941 bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
941 } 942 }
942 943
943 if (bnx2i_conn->gen_pdu.req_bd_tbl) { 944 if (bnx2i_conn->gen_pdu.req_bd_tbl) {
944 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 945 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
945 bnx2i_conn->gen_pdu.req_bd_tbl, 946 bnx2i_conn->gen_pdu.req_bd_tbl,
946 bnx2i_conn->gen_pdu.req_bd_dma); 947 bnx2i_conn->gen_pdu.req_bd_dma);
947 bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 948 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
998 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; 999 bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
999 1000
1000 bnx2i_conn->gen_pdu.req_bd_tbl = 1001 bnx2i_conn->gen_pdu.req_bd_tbl =
1001 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 1002 dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1002 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); 1003 &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
1003 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) 1004 if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
1004 goto login_req_bd_tbl_failure; 1005 goto login_req_bd_tbl_failure;
1005 1006
1006 bnx2i_conn->gen_pdu.resp_bd_tbl = 1007 bnx2i_conn->gen_pdu.resp_bd_tbl =
1007 dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, 1008 dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1008 &bnx2i_conn->gen_pdu.resp_bd_dma, 1009 &bnx2i_conn->gen_pdu.resp_bd_dma,
1009 GFP_KERNEL); 1010 GFP_KERNEL);
1010 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) 1011 if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
1013 return 0; 1014 return 0;
1014 1015
1015login_resp_bd_tbl_failure: 1016login_resp_bd_tbl_failure:
1016 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1017 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
1017 bnx2i_conn->gen_pdu.req_bd_tbl, 1018 bnx2i_conn->gen_pdu.req_bd_tbl,
1018 bnx2i_conn->gen_pdu.req_bd_dma); 1019 bnx2i_conn->gen_pdu.req_bd_dma);
1019 bnx2i_conn->gen_pdu.req_bd_tbl = NULL; 1020 bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index cf86e729532b..dc697cee248a 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
433 unsigned long flags; 433 unsigned long flags;
434 int locked = 1; 434 int locked = 1;
435 435
436 local_irq_save(flags); 436 if (port->sysrq || oops_in_progress)
437 if (port->sysrq) { 437 locked = spin_trylock_irqsave(&port->lock, flags);
438 locked = 0; 438 else
439 } else if (oops_in_progress) { 439 spin_lock_irqsave(&port->lock, flags);
440 locked = spin_trylock(&port->lock);
441 } else
442 spin_lock(&port->lock);
443 440
444 while (n > 0) { 441 while (n > 0) {
445 unsigned long ra = __pa(con_write_page); 442 unsigned long ra = __pa(con_write_page);
@@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
470 } 467 }
471 468
472 if (locked) 469 if (locked)
473 spin_unlock(&port->lock); 470 spin_unlock_irqrestore(&port->lock, flags);
474 local_irq_restore(flags);
475} 471}
476 472
477static inline void sunhv_console_putchar(struct uart_port *port, char c) 473static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
492 unsigned long flags; 488 unsigned long flags;
493 int i, locked = 1; 489 int i, locked = 1;
494 490
495 local_irq_save(flags); 491 if (port->sysrq || oops_in_progress)
492 locked = spin_trylock_irqsave(&port->lock, flags);
493 else
494 spin_lock_irqsave(&port->lock, flags);
496 if (port->sysrq) { 495 if (port->sysrq) {
497 locked = 0; 496 locked = 0;
498 } else if (oops_in_progress) { 497 } else if (oops_in_progress) {
@@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
507 } 506 }
508 507
509 if (locked) 508 if (locked)
510 spin_unlock(&port->lock); 509 spin_unlock_irqrestore(&port->lock, flags);
511 local_irq_restore(flags);
512} 510}
513 511
514static struct console sunhv_console = { 512static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 380fb5355cb2..5faa8e905e98 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
844 unsigned long flags; 844 unsigned long flags;
845 int locked = 1; 845 int locked = 1;
846 846
847 local_irq_save(flags); 847 if (up->port.sysrq || oops_in_progress)
848 if (up->port.sysrq) { 848 locked = spin_trylock_irqsave(&up->port.lock, flags);
849 locked = 0; 849 else
850 } else if (oops_in_progress) { 850 spin_lock_irqsave(&up->port.lock, flags);
851 locked = spin_trylock(&up->port.lock);
852 } else
853 spin_lock(&up->port.lock);
854 851
855 uart_console_write(&up->port, s, n, sunsab_console_putchar); 852 uart_console_write(&up->port, s, n, sunsab_console_putchar);
856 sunsab_tec_wait(up); 853 sunsab_tec_wait(up);
857 854
858 if (locked) 855 if (locked)
859 spin_unlock(&up->port.lock); 856 spin_unlock_irqrestore(&up->port.lock, flags);
860 local_irq_restore(flags);
861} 857}
862 858
863static int sunsab_console_setup(struct console *con, char *options) 859static int sunsab_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index db79b76f5c8e..9a0f24f83720 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s,
1295 unsigned int ier; 1295 unsigned int ier;
1296 int locked = 1; 1296 int locked = 1;
1297 1297
1298 local_irq_save(flags); 1298 if (up->port.sysrq || oops_in_progress)
1299 if (up->port.sysrq) { 1299 locked = spin_trylock_irqsave(&up->port.lock, flags);
1300 locked = 0; 1300 else
1301 } else if (oops_in_progress) { 1301 spin_lock_irqsave(&up->port.lock, flags);
1302 locked = spin_trylock(&up->port.lock);
1303 } else
1304 spin_lock(&up->port.lock);
1305 1302
1306 /* 1303 /*
1307 * First save the UER then disable the interrupts 1304 * First save the UER then disable the interrupts
@@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s,
1319 serial_out(up, UART_IER, ier); 1316 serial_out(up, UART_IER, ier);
1320 1317
1321 if (locked) 1318 if (locked)
1322 spin_unlock(&up->port.lock); 1319 spin_unlock_irqrestore(&up->port.lock, flags);
1323 local_irq_restore(flags);
1324} 1320}
1325 1321
1326/* 1322/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 45a8c6aa5837..a2c40ed287d2 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
1195 unsigned long flags; 1195 unsigned long flags;
1196 int locked = 1; 1196 int locked = 1;
1197 1197
1198 local_irq_save(flags); 1198 if (up->port.sysrq || oops_in_progress)
1199 if (up->port.sysrq) { 1199 locked = spin_trylock_irqsave(&up->port.lock, flags);
1200 locked = 0; 1200 else
1201 } else if (oops_in_progress) { 1201 spin_lock_irqsave(&up->port.lock, flags);
1202 locked = spin_trylock(&up->port.lock);
1203 } else
1204 spin_lock(&up->port.lock);
1205 1202
1206 uart_console_write(&up->port, s, count, sunzilog_putchar); 1203 uart_console_write(&up->port, s, count, sunzilog_putchar);
1207 udelay(2); 1204 udelay(2);
1208 1205
1209 if (locked) 1206 if (locked)
1210 spin_unlock(&up->port.lock); 1207 spin_unlock_irqrestore(&up->port.lock, flags);
1211 local_irq_restore(flags);
1212} 1208}
1213 1209
1214static int __init sunzilog_console_setup(struct console *con, char *options) 1210static int __init sunzilog_console_setup(struct console *con, char *options)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index a0fa5de210cf..e1e22e0f01e8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
505 r = -ENOBUFS; 505 r = -ENOBUFS;
506 goto err; 506 goto err;
507 } 507 }
508 d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, 508 r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
509 ARRAY_SIZE(vq->iov) - seg, &out, 509 ARRAY_SIZE(vq->iov) - seg, &out,
510 &in, log, log_num); 510 &in, log, log_num);
511 if (unlikely(r < 0))
512 goto err;
513
514 d = r;
511 if (d == vq->num) { 515 if (d == vq->num) {
512 r = 0; 516 r = 0;
513 goto err; 517 goto err;
@@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
532 *iovcount = seg; 536 *iovcount = seg;
533 if (unlikely(log)) 537 if (unlikely(log))
534 *log_num = nlogs; 538 *log_num = nlogs;
539
540 /* Detect overrun */
541 if (unlikely(datalen > 0)) {
542 r = UIO_MAXIOV + 1;
543 goto err;
544 }
535 return headcount; 545 return headcount;
536err: 546err:
537 vhost_discard_vq_desc(vq, headcount); 547 vhost_discard_vq_desc(vq, headcount);
@@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
587 /* On error, stop handling until the next kick. */ 597 /* On error, stop handling until the next kick. */
588 if (unlikely(headcount < 0)) 598 if (unlikely(headcount < 0))
589 break; 599 break;
600 /* On overrun, truncate and discard */
601 if (unlikely(headcount > UIO_MAXIOV)) {
602 msg.msg_iovlen = 1;
603 err = sock->ops->recvmsg(NULL, sock, &msg,
604 1, MSG_DONTWAIT | MSG_TRUNC);
605 pr_debug("Discarded rx packet: len %zd\n", sock_len);
606 continue;
607 }
590 /* OK, now we need to know about added descriptors. */ 608 /* OK, now we need to know about added descriptors. */
591 if (!headcount) { 609 if (!headcount) {
592 if (unlikely(vhost_enable_notify(&net->dev, vq))) { 610 if (unlikely(vhost_enable_notify(&net->dev, vq))) {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 37d06ea624aa..61a6ac8fa8fc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
399 state = BP_EAGAIN; 399 state = BP_EAGAIN;
400 break; 400 break;
401 } 401 }
402 scrub_page(page);
402 403
403 pfn = page_to_pfn(page); 404 frame_list[i] = page_to_pfn(page);
404 frame_list[i] = pfn_to_mfn(pfn); 405 }
405 406
406 scrub_page(page); 407 /*
408 * Ensure that ballooned highmem pages don't have kmaps.
409 *
410 * Do this before changing the p2m as kmap_flush_unused()
411 * reads PTEs to obtain pages (and hence needs the original
412 * p2m entry).
413 */
414 kmap_flush_unused();
415
416 /* Update direct mapping, invalidate P2M, and add to balloon. */
417 for (i = 0; i < nr_pages; i++) {
418 pfn = frame_list[i];
419 frame_list[i] = pfn_to_mfn(pfn);
420 page = pfn_to_page(pfn);
407 421
408#ifdef CONFIG_XEN_HAVE_PVMMU 422#ifdef CONFIG_XEN_HAVE_PVMMU
409 /* 423 /*
@@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
429 } 443 }
430#endif 444#endif
431 445
432 balloon_append(pfn_to_page(pfn)); 446 balloon_append(page);
433 } 447 }
434 448
435 /* Ensure that ballooned highmem pages don't have kmaps. */
436 kmap_flush_unused();
437 flush_tlb_all(); 449 flush_tlb_all();
438 450
439 set_xen_guest_handle(reservation.extent_start, frame_list); 451 set_xen_guest_handle(reservation.extent_start, frame_list);
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 24084732b1d0..80ef38c73e5a 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
41static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, 41static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
42 int flags, const char *dev_name, void *data) 42 int flags, const char *dev_name, void *data)
43{ 43{
44 struct dentry *root; 44 return mount_pseudo(fs_type, "anon_inode:", NULL,
45 root = mount_pseudo(fs_type, "anon_inode:", NULL,
46 &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); 45 &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
47 if (!IS_ERR(root)) {
48 struct super_block *s = root->d_sb;
49 anon_inode_inode = alloc_anon_inode(s);
50 if (IS_ERR(anon_inode_inode)) {
51 dput(root);
52 deactivate_locked_super(s);
53 root = ERR_CAST(anon_inode_inode);
54 }
55 }
56 return root;
57} 46}
58 47
59static struct file_system_type anon_inode_fs_type = { 48static struct file_system_type anon_inode_fs_type = {
@@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
175 164
176static int __init anon_inode_init(void) 165static int __init anon_inode_init(void)
177{ 166{
178 int error;
179
180 error = register_filesystem(&anon_inode_fs_type);
181 if (error)
182 goto err_exit;
183 anon_inode_mnt = kern_mount(&anon_inode_fs_type); 167 anon_inode_mnt = kern_mount(&anon_inode_fs_type);
184 if (IS_ERR(anon_inode_mnt)) { 168 if (IS_ERR(anon_inode_mnt))
185 error = PTR_ERR(anon_inode_mnt); 169 panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
186 goto err_unregister_filesystem;
187 }
188 return 0;
189 170
190err_unregister_filesystem: 171 anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
191 unregister_filesystem(&anon_inode_fs_type); 172 if (IS_ERR(anon_inode_inode))
192err_exit: 173 panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
193 panic(KERN_ERR "anon_inode_init() failed (%d)\n", error); 174
175 return 0;
194} 176}
195 177
196fs_initcall(anon_inode_init); 178fs_initcall(anon_inode_init);
diff --git a/fs/dcache.c b/fs/dcache.c
index 66dc62cb766d..089f681ac952 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2833 u32 dlen = ACCESS_ONCE(name->len); 2833 u32 dlen = ACCESS_ONCE(name->len);
2834 char *p; 2834 char *p;
2835 2835
2836 if (*buflen < dlen + 1)
2837 return -ENAMETOOLONG;
2838 *buflen -= dlen + 1; 2836 *buflen -= dlen + 1;
2837 if (*buflen < 0)
2838 return -ENAMETOOLONG;
2839 p = *buffer -= dlen + 1; 2839 p = *buffer -= dlen + 1;
2840 *p++ = '/'; 2840 *p++ = '/';
2841 while (dlen--) { 2841 while (dlen--) {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6e39895a91b8..24bfd7ff3049 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
40#include <linux/aio.h> 40#include <linux/aio.h>
41#include <linux/bitops.h>
41 42
42#include "ext4_jbd2.h" 43#include "ext4_jbd2.h"
43#include "xattr.h" 44#include "xattr.h"
@@ -3921,18 +3922,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3921void ext4_set_inode_flags(struct inode *inode) 3922void ext4_set_inode_flags(struct inode *inode)
3922{ 3923{
3923 unsigned int flags = EXT4_I(inode)->i_flags; 3924 unsigned int flags = EXT4_I(inode)->i_flags;
3925 unsigned int new_fl = 0;
3924 3926
3925 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3926 if (flags & EXT4_SYNC_FL) 3927 if (flags & EXT4_SYNC_FL)
3927 inode->i_flags |= S_SYNC; 3928 new_fl |= S_SYNC;
3928 if (flags & EXT4_APPEND_FL) 3929 if (flags & EXT4_APPEND_FL)
3929 inode->i_flags |= S_APPEND; 3930 new_fl |= S_APPEND;
3930 if (flags & EXT4_IMMUTABLE_FL) 3931 if (flags & EXT4_IMMUTABLE_FL)
3931 inode->i_flags |= S_IMMUTABLE; 3932 new_fl |= S_IMMUTABLE;
3932 if (flags & EXT4_NOATIME_FL) 3933 if (flags & EXT4_NOATIME_FL)
3933 inode->i_flags |= S_NOATIME; 3934 new_fl |= S_NOATIME;
3934 if (flags & EXT4_DIRSYNC_FL) 3935 if (flags & EXT4_DIRSYNC_FL)
3935 inode->i_flags |= S_DIRSYNC; 3936 new_fl |= S_DIRSYNC;
3937 set_mask_bits(&inode->i_flags,
3938 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
3936} 3939}
3937 3940
3938/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ 3941/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
diff --git a/fs/file.c b/fs/file.c
index 60a45e9f5323..eb56a13dab3e 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd)
713 713
714unsigned long __fdget_pos(unsigned int fd) 714unsigned long __fdget_pos(unsigned int fd)
715{ 715{
716 struct files_struct *files = current->files; 716 unsigned long v = __fdget(fd);
717 struct file *file; 717 struct file *file = (struct file *)(v & ~3);
718 unsigned long v;
719
720 if (atomic_read(&files->count) == 1) {
721 file = __fcheck_files(files, fd);
722 v = 0;
723 } else {
724 file = __fget(fd, 0);
725 v = FDPUT_FPUT;
726 }
727 if (!file)
728 return 0;
729 718
730 if (file->f_mode & FMODE_ATOMIC_POS) { 719 if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
731 if (file_count(file) > 1) { 720 if (file_count(file) > 1) {
732 v |= FDPUT_POS_UNLOCK; 721 v |= FDPUT_POS_UNLOCK;
733 mutex_lock(&file->f_pos_lock); 722 mutex_lock(&file->f_pos_lock);
734 } 723 }
735 } 724 }
736 return v | (unsigned long)file; 725 return v;
737} 726}
738 727
739/* 728/*
diff --git a/fs/mount.h b/fs/mount.h
index a17458ca6f29..b29e42f05f34 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -19,13 +19,13 @@ struct mnt_pcp {
19}; 19};
20 20
21struct mountpoint { 21struct mountpoint {
22 struct list_head m_hash; 22 struct hlist_node m_hash;
23 struct dentry *m_dentry; 23 struct dentry *m_dentry;
24 int m_count; 24 int m_count;
25}; 25};
26 26
27struct mount { 27struct mount {
28 struct list_head mnt_hash; 28 struct hlist_node mnt_hash;
29 struct mount *mnt_parent; 29 struct mount *mnt_parent;
30 struct dentry *mnt_mountpoint; 30 struct dentry *mnt_mountpoint;
31 struct vfsmount mnt; 31 struct vfsmount mnt;
diff --git a/fs/namei.c b/fs/namei.c
index 2f730ef9b4b3..4b491b431990 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1109,7 +1109,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
1109 return false; 1109 return false;
1110 1110
1111 if (!d_mountpoint(path->dentry)) 1111 if (!d_mountpoint(path->dentry))
1112 break; 1112 return true;
1113 1113
1114 mounted = __lookup_mnt(path->mnt, path->dentry); 1114 mounted = __lookup_mnt(path->mnt, path->dentry);
1115 if (!mounted) 1115 if (!mounted)
@@ -1125,20 +1125,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
1125 */ 1125 */
1126 *inode = path->dentry->d_inode; 1126 *inode = path->dentry->d_inode;
1127 } 1127 }
1128 return true; 1128 return read_seqretry(&mount_lock, nd->m_seq);
1129}
1130
1131static void follow_mount_rcu(struct nameidata *nd)
1132{
1133 while (d_mountpoint(nd->path.dentry)) {
1134 struct mount *mounted;
1135 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
1136 if (!mounted)
1137 break;
1138 nd->path.mnt = &mounted->mnt;
1139 nd->path.dentry = mounted->mnt.mnt_root;
1140 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1141 }
1142} 1129}
1143 1130
1144static int follow_dotdot_rcu(struct nameidata *nd) 1131static int follow_dotdot_rcu(struct nameidata *nd)
@@ -1166,7 +1153,17 @@ static int follow_dotdot_rcu(struct nameidata *nd)
1166 break; 1153 break;
1167 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); 1154 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1168 } 1155 }
1169 follow_mount_rcu(nd); 1156 while (d_mountpoint(nd->path.dentry)) {
1157 struct mount *mounted;
1158 mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
1159 if (!mounted)
1160 break;
1161 nd->path.mnt = &mounted->mnt;
1162 nd->path.dentry = mounted->mnt.mnt_root;
1163 nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
1164 if (!read_seqretry(&mount_lock, nd->m_seq))
1165 goto failed;
1166 }
1170 nd->inode = nd->path.dentry->d_inode; 1167 nd->inode = nd->path.dentry->d_inode;
1171 return 0; 1168 return 0;
1172 1169
diff --git a/fs/namespace.c b/fs/namespace.c
index 22e536705c45..2ffc5a2905d4 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -23,11 +23,34 @@
23#include <linux/uaccess.h> 23#include <linux/uaccess.h>
24#include <linux/proc_ns.h> 24#include <linux/proc_ns.h>
25#include <linux/magic.h> 25#include <linux/magic.h>
26#include <linux/bootmem.h>
26#include "pnode.h" 27#include "pnode.h"
27#include "internal.h" 28#include "internal.h"
28 29
29#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) 30static unsigned int m_hash_mask __read_mostly;
30#define HASH_SIZE (1UL << HASH_SHIFT) 31static unsigned int m_hash_shift __read_mostly;
32static unsigned int mp_hash_mask __read_mostly;
33static unsigned int mp_hash_shift __read_mostly;
34
35static __initdata unsigned long mhash_entries;
36static int __init set_mhash_entries(char *str)
37{
38 if (!str)
39 return 0;
40 mhash_entries = simple_strtoul(str, &str, 0);
41 return 1;
42}
43__setup("mhash_entries=", set_mhash_entries);
44
45static __initdata unsigned long mphash_entries;
46static int __init set_mphash_entries(char *str)
47{
48 if (!str)
49 return 0;
50 mphash_entries = simple_strtoul(str, &str, 0);
51 return 1;
52}
53__setup("mphash_entries=", set_mphash_entries);
31 54
32static int event; 55static int event;
33static DEFINE_IDA(mnt_id_ida); 56static DEFINE_IDA(mnt_id_ida);
@@ -36,8 +59,8 @@ static DEFINE_SPINLOCK(mnt_id_lock);
36static int mnt_id_start = 0; 59static int mnt_id_start = 0;
37static int mnt_group_start = 1; 60static int mnt_group_start = 1;
38 61
39static struct list_head *mount_hashtable __read_mostly; 62static struct hlist_head *mount_hashtable __read_mostly;
40static struct list_head *mountpoint_hashtable __read_mostly; 63static struct hlist_head *mountpoint_hashtable __read_mostly;
41static struct kmem_cache *mnt_cache __read_mostly; 64static struct kmem_cache *mnt_cache __read_mostly;
42static DECLARE_RWSEM(namespace_sem); 65static DECLARE_RWSEM(namespace_sem);
43 66
@@ -55,12 +78,19 @@ EXPORT_SYMBOL_GPL(fs_kobj);
55 */ 78 */
56__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); 79__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
57 80
58static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 81static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
59{ 82{
60 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); 83 unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
61 tmp += ((unsigned long)dentry / L1_CACHE_BYTES); 84 tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
62 tmp = tmp + (tmp >> HASH_SHIFT); 85 tmp = tmp + (tmp >> m_hash_shift);
63 return tmp & (HASH_SIZE - 1); 86 return &mount_hashtable[tmp & m_hash_mask];
87}
88
89static inline struct hlist_head *mp_hash(struct dentry *dentry)
90{
91 unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
92 tmp = tmp + (tmp >> mp_hash_shift);
93 return &mountpoint_hashtable[tmp & mp_hash_mask];
64} 94}
65 95
66/* 96/*
@@ -187,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name)
187 mnt->mnt_writers = 0; 217 mnt->mnt_writers = 0;
188#endif 218#endif
189 219
190 INIT_LIST_HEAD(&mnt->mnt_hash); 220 INIT_HLIST_NODE(&mnt->mnt_hash);
191 INIT_LIST_HEAD(&mnt->mnt_child); 221 INIT_LIST_HEAD(&mnt->mnt_child);
192 INIT_LIST_HEAD(&mnt->mnt_mounts); 222 INIT_LIST_HEAD(&mnt->mnt_mounts);
193 INIT_LIST_HEAD(&mnt->mnt_list); 223 INIT_LIST_HEAD(&mnt->mnt_list);
@@ -575,10 +605,10 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
575 */ 605 */
576struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) 606struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
577{ 607{
578 struct list_head *head = mount_hashtable + hash(mnt, dentry); 608 struct hlist_head *head = m_hash(mnt, dentry);
579 struct mount *p; 609 struct mount *p;
580 610
581 list_for_each_entry_rcu(p, head, mnt_hash) 611 hlist_for_each_entry_rcu(p, head, mnt_hash)
582 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 612 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
583 return p; 613 return p;
584 return NULL; 614 return NULL;
@@ -590,13 +620,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
590 */ 620 */
591struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) 621struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
592{ 622{
593 struct list_head *head = mount_hashtable + hash(mnt, dentry); 623 struct mount *p, *res;
594 struct mount *p; 624 res = p = __lookup_mnt(mnt, dentry);
595 625 if (!p)
596 list_for_each_entry_reverse(p, head, mnt_hash) 626 goto out;
597 if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) 627 hlist_for_each_entry_continue(p, mnt_hash) {
598 return p; 628 if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
599 return NULL; 629 break;
630 res = p;
631 }
632out:
633 return res;
600} 634}
601 635
602/* 636/*
@@ -633,11 +667,11 @@ struct vfsmount *lookup_mnt(struct path *path)
633 667
634static struct mountpoint *new_mountpoint(struct dentry *dentry) 668static struct mountpoint *new_mountpoint(struct dentry *dentry)
635{ 669{
636 struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry); 670 struct hlist_head *chain = mp_hash(dentry);
637 struct mountpoint *mp; 671 struct mountpoint *mp;
638 int ret; 672 int ret;
639 673
640 list_for_each_entry(mp, chain, m_hash) { 674 hlist_for_each_entry(mp, chain, m_hash) {
641 if (mp->m_dentry == dentry) { 675 if (mp->m_dentry == dentry) {
642 /* might be worth a WARN_ON() */ 676 /* might be worth a WARN_ON() */
643 if (d_unlinked(dentry)) 677 if (d_unlinked(dentry))
@@ -659,7 +693,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)
659 693
660 mp->m_dentry = dentry; 694 mp->m_dentry = dentry;
661 mp->m_count = 1; 695 mp->m_count = 1;
662 list_add(&mp->m_hash, chain); 696 hlist_add_head(&mp->m_hash, chain);
663 return mp; 697 return mp;
664} 698}
665 699
@@ -670,7 +704,7 @@ static void put_mountpoint(struct mountpoint *mp)
670 spin_lock(&dentry->d_lock); 704 spin_lock(&dentry->d_lock);
671 dentry->d_flags &= ~DCACHE_MOUNTED; 705 dentry->d_flags &= ~DCACHE_MOUNTED;
672 spin_unlock(&dentry->d_lock); 706 spin_unlock(&dentry->d_lock);
673 list_del(&mp->m_hash); 707 hlist_del(&mp->m_hash);
674 kfree(mp); 708 kfree(mp);
675 } 709 }
676} 710}
@@ -712,7 +746,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
712 mnt->mnt_parent = mnt; 746 mnt->mnt_parent = mnt;
713 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 747 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
714 list_del_init(&mnt->mnt_child); 748 list_del_init(&mnt->mnt_child);
715 list_del_init(&mnt->mnt_hash); 749 hlist_del_init_rcu(&mnt->mnt_hash);
716 put_mountpoint(mnt->mnt_mp); 750 put_mountpoint(mnt->mnt_mp);
717 mnt->mnt_mp = NULL; 751 mnt->mnt_mp = NULL;
718} 752}
@@ -739,15 +773,14 @@ static void attach_mnt(struct mount *mnt,
739 struct mountpoint *mp) 773 struct mountpoint *mp)
740{ 774{
741 mnt_set_mountpoint(parent, mp, mnt); 775 mnt_set_mountpoint(parent, mp, mnt);
742 list_add_tail(&mnt->mnt_hash, mount_hashtable + 776 hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
743 hash(&parent->mnt, mp->m_dentry));
744 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 777 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
745} 778}
746 779
747/* 780/*
748 * vfsmount lock must be held for write 781 * vfsmount lock must be held for write
749 */ 782 */
750static void commit_tree(struct mount *mnt) 783static void commit_tree(struct mount *mnt, struct mount *shadows)
751{ 784{
752 struct mount *parent = mnt->mnt_parent; 785 struct mount *parent = mnt->mnt_parent;
753 struct mount *m; 786 struct mount *m;
@@ -762,8 +795,11 @@ static void commit_tree(struct mount *mnt)
762 795
763 list_splice(&head, n->list.prev); 796 list_splice(&head, n->list.prev);
764 797
765 list_add_tail(&mnt->mnt_hash, mount_hashtable + 798 if (shadows)
766 hash(&parent->mnt, mnt->mnt_mountpoint)); 799 hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
800 else
801 hlist_add_head_rcu(&mnt->mnt_hash,
802 m_hash(&parent->mnt, mnt->mnt_mountpoint));
767 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); 803 list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
768 touch_mnt_namespace(n); 804 touch_mnt_namespace(n);
769} 805}
@@ -1153,26 +1189,28 @@ int may_umount(struct vfsmount *mnt)
1153 1189
1154EXPORT_SYMBOL(may_umount); 1190EXPORT_SYMBOL(may_umount);
1155 1191
1156static LIST_HEAD(unmounted); /* protected by namespace_sem */ 1192static HLIST_HEAD(unmounted); /* protected by namespace_sem */
1157 1193
1158static void namespace_unlock(void) 1194static void namespace_unlock(void)
1159{ 1195{
1160 struct mount *mnt; 1196 struct mount *mnt;
1161 LIST_HEAD(head); 1197 struct hlist_head head = unmounted;
1162 1198
1163 if (likely(list_empty(&unmounted))) { 1199 if (likely(hlist_empty(&head))) {
1164 up_write(&namespace_sem); 1200 up_write(&namespace_sem);
1165 return; 1201 return;
1166 } 1202 }
1167 1203
1168 list_splice_init(&unmounted, &head); 1204 head.first->pprev = &head.first;
1205 INIT_HLIST_HEAD(&unmounted);
1206
1169 up_write(&namespace_sem); 1207 up_write(&namespace_sem);
1170 1208
1171 synchronize_rcu(); 1209 synchronize_rcu();
1172 1210
1173 while (!list_empty(&head)) { 1211 while (!hlist_empty(&head)) {
1174 mnt = list_first_entry(&head, struct mount, mnt_hash); 1212 mnt = hlist_entry(head.first, struct mount, mnt_hash);
1175 list_del_init(&mnt->mnt_hash); 1213 hlist_del_init(&mnt->mnt_hash);
1176 if (mnt->mnt_ex_mountpoint.mnt) 1214 if (mnt->mnt_ex_mountpoint.mnt)
1177 path_put(&mnt->mnt_ex_mountpoint); 1215 path_put(&mnt->mnt_ex_mountpoint);
1178 mntput(&mnt->mnt); 1216 mntput(&mnt->mnt);
@@ -1193,16 +1231,19 @@ static inline void namespace_lock(void)
1193 */ 1231 */
1194void umount_tree(struct mount *mnt, int how) 1232void umount_tree(struct mount *mnt, int how)
1195{ 1233{
1196 LIST_HEAD(tmp_list); 1234 HLIST_HEAD(tmp_list);
1197 struct mount *p; 1235 struct mount *p;
1236 struct mount *last = NULL;
1198 1237
1199 for (p = mnt; p; p = next_mnt(p, mnt)) 1238 for (p = mnt; p; p = next_mnt(p, mnt)) {
1200 list_move(&p->mnt_hash, &tmp_list); 1239 hlist_del_init_rcu(&p->mnt_hash);
1240 hlist_add_head(&p->mnt_hash, &tmp_list);
1241 }
1201 1242
1202 if (how) 1243 if (how)
1203 propagate_umount(&tmp_list); 1244 propagate_umount(&tmp_list);
1204 1245
1205 list_for_each_entry(p, &tmp_list, mnt_hash) { 1246 hlist_for_each_entry(p, &tmp_list, mnt_hash) {
1206 list_del_init(&p->mnt_expire); 1247 list_del_init(&p->mnt_expire);
1207 list_del_init(&p->mnt_list); 1248 list_del_init(&p->mnt_list);
1208 __touch_mnt_namespace(p->mnt_ns); 1249 __touch_mnt_namespace(p->mnt_ns);
@@ -1220,8 +1261,13 @@ void umount_tree(struct mount *mnt, int how)
1220 p->mnt_mp = NULL; 1261 p->mnt_mp = NULL;
1221 } 1262 }
1222 change_mnt_propagation(p, MS_PRIVATE); 1263 change_mnt_propagation(p, MS_PRIVATE);
1264 last = p;
1265 }
1266 if (last) {
1267 last->mnt_hash.next = unmounted.first;
1268 unmounted.first = tmp_list.first;
1269 unmounted.first->pprev = &unmounted.first;
1223 } 1270 }
1224 list_splice(&tmp_list, &unmounted);
1225} 1271}
1226 1272
1227static void shrink_submounts(struct mount *mnt); 1273static void shrink_submounts(struct mount *mnt);
@@ -1605,24 +1651,23 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1605 struct mountpoint *dest_mp, 1651 struct mountpoint *dest_mp,
1606 struct path *parent_path) 1652 struct path *parent_path)
1607{ 1653{
1608 LIST_HEAD(tree_list); 1654 HLIST_HEAD(tree_list);
1609 struct mount *child, *p; 1655 struct mount *child, *p;
1656 struct hlist_node *n;
1610 int err; 1657 int err;
1611 1658
1612 if (IS_MNT_SHARED(dest_mnt)) { 1659 if (IS_MNT_SHARED(dest_mnt)) {
1613 err = invent_group_ids(source_mnt, true); 1660 err = invent_group_ids(source_mnt, true);
1614 if (err) 1661 if (err)
1615 goto out; 1662 goto out;
1616 } 1663 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
1617 err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); 1664 if (err)
1618 if (err) 1665 goto out_cleanup_ids;
1619 goto out_cleanup_ids; 1666 lock_mount_hash();
1620
1621 lock_mount_hash();
1622
1623 if (IS_MNT_SHARED(dest_mnt)) {
1624 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1667 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
1625 set_mnt_shared(p); 1668 set_mnt_shared(p);
1669 } else {
1670 lock_mount_hash();
1626 } 1671 }
1627 if (parent_path) { 1672 if (parent_path) {
1628 detach_mnt(source_mnt, parent_path); 1673 detach_mnt(source_mnt, parent_path);
@@ -1630,20 +1675,22 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1630 touch_mnt_namespace(source_mnt->mnt_ns); 1675 touch_mnt_namespace(source_mnt->mnt_ns);
1631 } else { 1676 } else {
1632 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); 1677 mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
1633 commit_tree(source_mnt); 1678 commit_tree(source_mnt, NULL);
1634 } 1679 }
1635 1680
1636 list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { 1681 hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
1637 list_del_init(&child->mnt_hash); 1682 struct mount *q;
1638 commit_tree(child); 1683 hlist_del_init(&child->mnt_hash);
1684 q = __lookup_mnt_last(&child->mnt_parent->mnt,
1685 child->mnt_mountpoint);
1686 commit_tree(child, q);
1639 } 1687 }
1640 unlock_mount_hash(); 1688 unlock_mount_hash();
1641 1689
1642 return 0; 1690 return 0;
1643 1691
1644 out_cleanup_ids: 1692 out_cleanup_ids:
1645 if (IS_MNT_SHARED(dest_mnt)) 1693 cleanup_group_ids(source_mnt, NULL);
1646 cleanup_group_ids(source_mnt, NULL);
1647 out: 1694 out:
1648 return err; 1695 return err;
1649} 1696}
@@ -2777,18 +2824,24 @@ void __init mnt_init(void)
2777 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 2824 mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
2778 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 2825 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
2779 2826
2780 mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 2827 mount_hashtable = alloc_large_system_hash("Mount-cache",
2781 mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); 2828 sizeof(struct hlist_head),
2829 mhash_entries, 19,
2830 0,
2831 &m_hash_shift, &m_hash_mask, 0, 0);
2832 mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
2833 sizeof(struct hlist_head),
2834 mphash_entries, 19,
2835 0,
2836 &mp_hash_shift, &mp_hash_mask, 0, 0);
2782 2837
2783 if (!mount_hashtable || !mountpoint_hashtable) 2838 if (!mount_hashtable || !mountpoint_hashtable)
2784 panic("Failed to allocate mount hash table\n"); 2839 panic("Failed to allocate mount hash table\n");
2785 2840
2786 printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); 2841 for (u = 0; u <= m_hash_mask; u++)
2787 2842 INIT_HLIST_HEAD(&mount_hashtable[u]);
2788 for (u = 0; u < HASH_SIZE; u++) 2843 for (u = 0; u <= mp_hash_mask; u++)
2789 INIT_LIST_HEAD(&mount_hashtable[u]); 2844 INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
2790 for (u = 0; u < HASH_SIZE; u++)
2791 INIT_LIST_HEAD(&mountpoint_hashtable[u]);
2792 2845
2793 kernfs_init(); 2846 kernfs_init();
2794 2847
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 017d3cb5e99b..6d7be3f80356 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -449,6 +449,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
449 fh_lock(fhp); 449 fh_lock(fhp);
450 host_err = notify_change(dentry, iap, NULL); 450 host_err = notify_change(dentry, iap, NULL);
451 fh_unlock(fhp); 451 fh_unlock(fhp);
452 err = nfserrno(host_err);
452 453
453out_put_write_access: 454out_put_write_access:
454 if (size_change) 455 if (size_change)
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 1324e6600e57..ca5ce14cbddc 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name,
346 346
347 strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); 347 strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
348 new_conn->cc_namelen = grouplen; 348 new_conn->cc_namelen = grouplen;
349 strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1); 349 if (cluster_name_len)
350 strlcpy(new_conn->cc_cluster_name, cluster_name,
351 CLUSTER_NAME_MAX + 1);
350 new_conn->cc_cluster_name_len = cluster_name_len; 352 new_conn->cc_cluster_name_len = cluster_name_len;
351 new_conn->cc_recovery_handler = recovery_handler; 353 new_conn->cc_recovery_handler = recovery_handler;
352 new_conn->cc_recovery_data = recovery_data; 354 new_conn->cc_recovery_data = recovery_data;
diff --git a/fs/pnode.c b/fs/pnode.c
index c7221bb19801..88396df725b4 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -220,14 +220,14 @@ static struct mount *get_source(struct mount *dest,
220 * @tree_list : list of heads of trees to be attached. 220 * @tree_list : list of heads of trees to be attached.
221 */ 221 */
222int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, 222int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
223 struct mount *source_mnt, struct list_head *tree_list) 223 struct mount *source_mnt, struct hlist_head *tree_list)
224{ 224{
225 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; 225 struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
226 struct mount *m, *child; 226 struct mount *m, *child;
227 int ret = 0; 227 int ret = 0;
228 struct mount *prev_dest_mnt = dest_mnt; 228 struct mount *prev_dest_mnt = dest_mnt;
229 struct mount *prev_src_mnt = source_mnt; 229 struct mount *prev_src_mnt = source_mnt;
230 LIST_HEAD(tmp_list); 230 HLIST_HEAD(tmp_list);
231 231
232 for (m = propagation_next(dest_mnt, dest_mnt); m; 232 for (m = propagation_next(dest_mnt, dest_mnt); m;
233 m = propagation_next(m, dest_mnt)) { 233 m = propagation_next(m, dest_mnt)) {
@@ -246,27 +246,29 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
246 child = copy_tree(source, source->mnt.mnt_root, type); 246 child = copy_tree(source, source->mnt.mnt_root, type);
247 if (IS_ERR(child)) { 247 if (IS_ERR(child)) {
248 ret = PTR_ERR(child); 248 ret = PTR_ERR(child);
249 list_splice(tree_list, tmp_list.prev); 249 tmp_list = *tree_list;
250 tmp_list.first->pprev = &tmp_list.first;
251 INIT_HLIST_HEAD(tree_list);
250 goto out; 252 goto out;
251 } 253 }
252 254
253 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { 255 if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
254 mnt_set_mountpoint(m, dest_mp, child); 256 mnt_set_mountpoint(m, dest_mp, child);
255 list_add_tail(&child->mnt_hash, tree_list); 257 hlist_add_head(&child->mnt_hash, tree_list);
256 } else { 258 } else {
257 /* 259 /*
258 * This can happen if the parent mount was bind mounted 260 * This can happen if the parent mount was bind mounted
259 * on some subdirectory of a shared/slave mount. 261 * on some subdirectory of a shared/slave mount.
260 */ 262 */
261 list_add_tail(&child->mnt_hash, &tmp_list); 263 hlist_add_head(&child->mnt_hash, &tmp_list);
262 } 264 }
263 prev_dest_mnt = m; 265 prev_dest_mnt = m;
264 prev_src_mnt = child; 266 prev_src_mnt = child;
265 } 267 }
266out: 268out:
267 lock_mount_hash(); 269 lock_mount_hash();
268 while (!list_empty(&tmp_list)) { 270 while (!hlist_empty(&tmp_list)) {
269 child = list_first_entry(&tmp_list, struct mount, mnt_hash); 271 child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
270 umount_tree(child, 0); 272 umount_tree(child, 0);
271 } 273 }
272 unlock_mount_hash(); 274 unlock_mount_hash();
@@ -338,8 +340,10 @@ static void __propagate_umount(struct mount *mnt)
338 * umount the child only if the child has no 340 * umount the child only if the child has no
339 * other children 341 * other children
340 */ 342 */
341 if (child && list_empty(&child->mnt_mounts)) 343 if (child && list_empty(&child->mnt_mounts)) {
342 list_move_tail(&child->mnt_hash, &mnt->mnt_hash); 344 hlist_del_init_rcu(&child->mnt_hash);
345 hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
346 }
343 } 347 }
344} 348}
345 349
@@ -350,11 +354,11 @@ static void __propagate_umount(struct mount *mnt)
350 * 354 *
351 * vfsmount lock must be held for write 355 * vfsmount lock must be held for write
352 */ 356 */
353int propagate_umount(struct list_head *list) 357int propagate_umount(struct hlist_head *list)
354{ 358{
355 struct mount *mnt; 359 struct mount *mnt;
356 360
357 list_for_each_entry(mnt, list, mnt_hash) 361 hlist_for_each_entry(mnt, list, mnt_hash)
358 __propagate_umount(mnt); 362 __propagate_umount(mnt);
359 return 0; 363 return 0;
360} 364}
diff --git a/fs/pnode.h b/fs/pnode.h
index 59e7eda1851e..fc28a27fa892 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -36,8 +36,8 @@ static inline void set_mnt_shared(struct mount *mnt)
36 36
37void change_mnt_propagation(struct mount *, int); 37void change_mnt_propagation(struct mount *, int);
38int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, 38int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
39 struct list_head *); 39 struct hlist_head *);
40int propagate_umount(struct list_head *); 40int propagate_umount(struct hlist_head *);
41int propagate_mount_busy(struct mount *, int); 41int propagate_mount_busy(struct mount *, int);
42void mnt_release_group_id(struct mount *); 42void mnt_release_group_id(struct mount *);
43int get_dominating_id(struct mount *mnt, const struct path *root); 43int get_dominating_id(struct mount *mnt, const struct path *root);
diff --git a/fs/read_write.c b/fs/read_write.c
index 54e19b9392dc..28cc9c810744 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -307,7 +307,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
307 unsigned int, whence) 307 unsigned int, whence)
308{ 308{
309 int retval; 309 int retval;
310 struct fd f = fdget(fd); 310 struct fd f = fdget_pos(fd);
311 loff_t offset; 311 loff_t offset;
312 312
313 if (!f.file) 313 if (!f.file)
@@ -327,7 +327,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
327 retval = 0; 327 retval = 0;
328 } 328 }
329out_putf: 329out_putf:
330 fdput(f); 330 fdput_pos(f);
331 return retval; 331 return retval;
332} 332}
333#endif 333#endif
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index abc9ca778456..be5fd38bd5a0 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -196,6 +196,21 @@ static inline unsigned long __ffs64(u64 word)
196 196
197#ifdef __KERNEL__ 197#ifdef __KERNEL__
198 198
199#ifndef set_mask_bits
200#define set_mask_bits(ptr, _mask, _bits) \
201({ \
202 const typeof(*ptr) mask = (_mask), bits = (_bits); \
203 typeof(*ptr) old, new; \
204 \
205 do { \
206 old = ACCESS_ONCE(*ptr); \
207 new = (old & ~mask) | bits; \
208 } while (cmpxchg(ptr, old, new) != old); \
209 \
210 new; \
211})
212#endif
213
199#ifndef find_last_bit 214#ifndef find_last_bit
200/** 215/**
201 * find_last_bit - find the last set bit in a memory region 216 * find_last_bit - find the last set bit in a memory region
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4e4cc28623ad..4cdb3a17bcb5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -495,10 +495,6 @@ enum {
495 FILTER_TRACE_FN, 495 FILTER_TRACE_FN,
496}; 496};
497 497
498#define EVENT_STORAGE_SIZE 128
499extern struct mutex event_storage_mutex;
500extern char event_storage[EVENT_STORAGE_SIZE];
501
502extern int trace_event_raw_init(struct ftrace_event_call *call); 498extern int trace_event_raw_init(struct ftrace_event_call *call);
503extern int trace_define_field(struct ftrace_event_call *call, const char *type, 499extern int trace_define_field(struct ftrace_event_call *call, const char *type,
504 const char *name, int offset, int size, 500 const char *name, int offset, int size,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 1005ebf17575..5a09a48f2658 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -163,4 +163,11 @@ enum {
163/* changeable features with no special hardware requirements */ 163/* changeable features with no special hardware requirements */
164#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) 164#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
165 165
166#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
167 NETIF_F_HW_VLAN_CTAG_RX | \
168 NETIF_F_HW_VLAN_CTAG_TX | \
169 NETIF_F_HW_VLAN_STAG_FILTER | \
170 NETIF_F_HW_VLAN_STAG_RX | \
171 NETIF_F_HW_VLAN_STAG_TX)
172
166#endif /* _LINUX_NETDEV_FEATURES_H */ 173#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8eeebd49a98..daafd9561cbc 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3014,7 +3014,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3014{ 3014{
3015 return __skb_gso_segment(skb, features, true); 3015 return __skb_gso_segment(skb, features, true);
3016} 3016}
3017__be16 skb_network_protocol(struct sk_buff *skb); 3017__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
3018 3018
3019static inline bool can_checksum_protocol(netdev_features_t features, 3019static inline bool can_checksum_protocol(netdev_features_t features,
3020 __be16 protocol) 3020 __be16 protocol)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1da693d51255..b66c2110cb1f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -250,8 +250,7 @@ struct rmap_walk_control {
250 int (*rmap_one)(struct page *page, struct vm_area_struct *vma, 250 int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
251 unsigned long addr, void *arg); 251 unsigned long addr, void *arg);
252 int (*done)(struct page *page); 252 int (*done)(struct page *page);
253 int (*file_nonlinear)(struct page *, struct address_space *, 253 int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
254 struct vm_area_struct *vma);
255 struct anon_vma *(*anon_lock)(struct page *page); 254 struct anon_vma *(*anon_lock)(struct page *page);
256 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 255 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
257}; 256};
diff --git a/include/linux/security.h b/include/linux/security.h
index 5623a7f965b7..2fc42d191f79 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1040 * Allocate a security structure to the xp->security field; the security 1040 * Allocate a security structure to the xp->security field; the security
1041 * field is initialized to NULL when the xfrm_policy is allocated. 1041 * field is initialized to NULL when the xfrm_policy is allocated.
1042 * Return 0 if operation was successful (memory to allocate, legal context) 1042 * Return 0 if operation was successful (memory to allocate, legal context)
1043 * @gfp is to specify the context for the allocation
1043 * @xfrm_policy_clone_security: 1044 * @xfrm_policy_clone_security:
1044 * @old_ctx contains an existing xfrm_sec_ctx. 1045 * @old_ctx contains an existing xfrm_sec_ctx.
1045 * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. 1046 * @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
@@ -1683,7 +1684,7 @@ struct security_operations {
1683 1684
1684#ifdef CONFIG_SECURITY_NETWORK_XFRM 1685#ifdef CONFIG_SECURITY_NETWORK_XFRM
1685 int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, 1686 int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
1686 struct xfrm_user_sec_ctx *sec_ctx); 1687 struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
1687 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); 1688 int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
1688 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); 1689 void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
1689 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); 1690 int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
@@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
2859 2860
2860#ifdef CONFIG_SECURITY_NETWORK_XFRM 2861#ifdef CONFIG_SECURITY_NETWORK_XFRM
2861 2862
2862int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); 2863int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
2864 struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
2863int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); 2865int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
2864void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); 2866void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
2865int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); 2867int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
@@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
2877 2879
2878#else /* CONFIG_SECURITY_NETWORK_XFRM */ 2880#else /* CONFIG_SECURITY_NETWORK_XFRM */
2879 2881
2880static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) 2882static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
2883 struct xfrm_user_sec_ctx *sec_ctx,
2884 gfp_t gfp)
2881{ 2885{
2882 return 0; 2886 return 0;
2883} 2887}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5e1e6f2d98c2..15ede6a823a6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2451,8 +2451,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2451 unsigned int flags); 2451 unsigned int flags);
2452void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2452void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2453unsigned int skb_zerocopy_headlen(const struct sk_buff *from); 2453unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2454void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, 2454int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
2455 int len, int hlen); 2455 int len, int hlen);
2456void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); 2456void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2457int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); 2457int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2458void skb_scrub_packet(struct sk_buff *skb, bool xnet); 2458void skb_scrub_packet(struct sk_buff *skb, bool xnet);
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index c3fa80745996..2c14d9cdd57a 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -88,6 +88,7 @@
88#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) 88#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
89 89
90struct cdc_ncm_ctx { 90struct cdc_ncm_ctx {
91 struct usb_cdc_ncm_ntb_parameters ncm_parm;
91 struct hrtimer tx_timer; 92 struct hrtimer tx_timer;
92 struct tasklet_struct bh; 93 struct tasklet_struct bh;
93 94
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index e303eef94dd5..0662e98fef72 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -30,7 +30,7 @@ struct usbnet {
30 struct driver_info *driver_info; 30 struct driver_info *driver_info;
31 const char *driver_name; 31 const char *driver_name;
32 void *driver_priv; 32 void *driver_priv;
33 wait_queue_head_t *wait; 33 wait_queue_head_t wait;
34 struct mutex phy_mutex; 34 struct mutex phy_mutex;
35 unsigned char suspend_count; 35 unsigned char suspend_count;
36 unsigned char pkt_cnt, pkt_err; 36 unsigned char pkt_cnt, pkt_err;
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 9650a3ffd2d2..b4956a5fcc3f 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -31,8 +31,10 @@
31#define IF_PREFIX_AUTOCONF 0x02 31#define IF_PREFIX_AUTOCONF 0x02
32 32
33enum { 33enum {
34 INET6_IFADDR_STATE_PREDAD,
34 INET6_IFADDR_STATE_DAD, 35 INET6_IFADDR_STATE_DAD,
35 INET6_IFADDR_STATE_POSTDAD, 36 INET6_IFADDR_STATE_POSTDAD,
37 INET6_IFADDR_STATE_ERRDAD,
36 INET6_IFADDR_STATE_UP, 38 INET6_IFADDR_STATE_UP,
37 INET6_IFADDR_STATE_DEAD, 39 INET6_IFADDR_STATE_DEAD,
38}; 40};
@@ -58,7 +60,7 @@ struct inet6_ifaddr {
58 unsigned long cstamp; /* created timestamp */ 60 unsigned long cstamp; /* created timestamp */
59 unsigned long tstamp; /* updated timestamp */ 61 unsigned long tstamp; /* updated timestamp */
60 62
61 struct timer_list dad_timer; 63 struct delayed_work dad_work;
62 64
63 struct inet6_dev *idev; 65 struct inet6_dev *idev;
64 struct rt6_info *rt; 66 struct rt6_info *rt;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8c4dd63134d4..743accec6c76 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
480#ifdef CONFIG_SYN_COOKIES 480#ifdef CONFIG_SYN_COOKIES
481#include <linux/ktime.h> 481#include <linux/ktime.h>
482 482
483/* Syncookies use a monotonic timer which increments every 64 seconds. 483/* Syncookies use a monotonic timer which increments every 60 seconds.
484 * This counter is used both as a hash input and partially encoded into 484 * This counter is used both as a hash input and partially encoded into
485 * the cookie value. A cookie is only validated further if the delta 485 * the cookie value. A cookie is only validated further if the delta
486 * between the current counter value and the encoded one is less than this, 486 * between the current counter value and the encoded one is less than this,
487 * i.e. a sent cookie is valid only at most for 128 seconds (or less if 487 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
488 * the counter advances immediately after a cookie is generated). 488 * the counter advances immediately after a cookie is generated).
489 */ 489 */
490#define MAX_SYNCOOKIE_AGE 2 490#define MAX_SYNCOOKIE_AGE 2
491 491
492static inline u32 tcp_cookie_time(void) 492static inline u32 tcp_cookie_time(void)
493{ 493{
494 struct timespec now; 494 u64 val = get_jiffies_64();
495 getnstimeofday(&now); 495
496 return now.tv_sec >> 6; /* 64 seconds granularity */ 496 do_div(val, 60 * HZ);
497 return val;
497} 498}
498 499
499u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 500u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1a8b28db3775..1ee19a24cc5f 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
310#undef __array 310#undef __array
311#define __array(type, item, len) \ 311#define __array(type, item, len) \
312 do { \ 312 do { \
313 mutex_lock(&event_storage_mutex); \ 313 char *type_str = #type"["__stringify(len)"]"; \
314 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 314 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
315 snprintf(event_storage, sizeof(event_storage), \ 315 ret = trace_define_field(event_call, type_str, #item, \
316 "%s[%d]", #type, len); \
317 ret = trace_define_field(event_call, event_storage, #item, \
318 offsetof(typeof(field), item), \ 316 offsetof(typeof(field), item), \
319 sizeof(field.item), \ 317 sizeof(field.item), \
320 is_signed_type(type), FILTER_OTHER); \ 318 is_signed_type(type), FILTER_OTHER); \
321 mutex_unlock(&event_storage_mutex); \
322 if (ret) \ 319 if (ret) \
323 return ret; \ 320 return ret; \
324 } while (0); 321 } while (0);
diff --git a/kernel/audit.c b/kernel/audit.c
index 3392d3e0254a..95a20f3f52f1 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -608,9 +608,19 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
608 int err = 0; 608 int err = 0;
609 609
610 /* Only support the initial namespaces for now. */ 610 /* Only support the initial namespaces for now. */
611 /*
612 * We return ECONNREFUSED because it tricks userspace into thinking
613 * that audit was not configured into the kernel. Lots of users
614 * configure their PAM stack (because that's what the distro does)
615 * to reject login if unable to send messages to audit. If we return
616 * ECONNREFUSED the PAM stack thinks the kernel does not have audit
617 * configured in and will let login proceed. If we return EPERM
618 * userspace will reject all logins. This should be removed when we
619 * support non init namespaces!!
620 */
611 if ((current_user_ns() != &init_user_ns) || 621 if ((current_user_ns() != &init_user_ns) ||
612 (task_active_pid_ns(current) != &init_pid_ns)) 622 (task_active_pid_ns(current) != &init_pid_ns))
613 return -EPERM; 623 return -ECONNREFUSED;
614 624
615 switch (msg_type) { 625 switch (msg_type) {
616 case AUDIT_LIST: 626 case AUDIT_LIST:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 105f273b6f86..0c753ddd223b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
4112 4112
4113 err = percpu_ref_init(&css->refcnt, css_release); 4113 err = percpu_ref_init(&css->refcnt, css_release);
4114 if (err) 4114 if (err)
4115 goto err_free; 4115 goto err_free_css;
4116 4116
4117 init_css(css, ss, cgrp); 4117 init_css(css, ss, cgrp);
4118 4118
4119 err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); 4119 err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
4120 if (err) 4120 if (err)
4121 goto err_free; 4121 goto err_free_percpu_ref;
4122 4122
4123 err = online_css(css); 4123 err = online_css(css);
4124 if (err) 4124 if (err)
4125 goto err_free; 4125 goto err_clear_dir;
4126 4126
4127 dget(cgrp->dentry); 4127 dget(cgrp->dentry);
4128 css_get(css->parent); 4128 css_get(css->parent);
@@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
4138 4138
4139 return 0; 4139 return 0;
4140 4140
4141err_free: 4141err_clear_dir:
4142 cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
4143err_free_percpu_ref:
4142 percpu_ref_cancel_init(&css->refcnt); 4144 percpu_ref_cancel_init(&css->refcnt);
4145err_free_css:
4143 ss->css_free(css); 4146 ss->css_free(css);
4144 return err; 4147 return err;
4145} 4148}
diff --git a/kernel/futex.c b/kernel/futex.c
index 44a1261cb9ff..08ec814ad9d2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = {
234 * waiting on a futex. 234 * waiting on a futex.
235 */ 235 */
236struct futex_hash_bucket { 236struct futex_hash_bucket {
237 atomic_t waiters;
237 spinlock_t lock; 238 spinlock_t lock;
238 struct plist_head chain; 239 struct plist_head chain;
239} ____cacheline_aligned_in_smp; 240} ____cacheline_aligned_in_smp;
@@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key)
253 smp_mb__after_atomic_inc(); 254 smp_mb__after_atomic_inc();
254} 255}
255 256
256static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) 257/*
258 * Reflects a new waiter being added to the waitqueue.
259 */
260static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
257{ 261{
258#ifdef CONFIG_SMP 262#ifdef CONFIG_SMP
263 atomic_inc(&hb->waiters);
259 /* 264 /*
260 * Tasks trying to enter the critical region are most likely 265 * Full barrier (A), see the ordering comment above.
261 * potential waiters that will be added to the plist. Ensure
262 * that wakers won't miss to-be-slept tasks in the window between
263 * the wait call and the actual plist_add.
264 */ 266 */
265 if (spin_is_locked(&hb->lock)) 267 smp_mb__after_atomic_inc();
266 return true; 268#endif
267 smp_rmb(); /* Make sure we check the lock state first */ 269}
270
271/*
272 * Reflects a waiter being removed from the waitqueue by wakeup
273 * paths.
274 */
275static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
276{
277#ifdef CONFIG_SMP
278 atomic_dec(&hb->waiters);
279#endif
280}
268 281
269 return !plist_head_empty(&hb->chain); 282static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
283{
284#ifdef CONFIG_SMP
285 return atomic_read(&hb->waiters);
270#else 286#else
271 return true; 287 return 1;
272#endif 288#endif
273} 289}
274 290
@@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q)
954 970
955 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); 971 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
956 plist_del(&q->list, &hb->chain); 972 plist_del(&q->list, &hb->chain);
973 hb_waiters_dec(hb);
957} 974}
958 975
959/* 976/*
@@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1257 */ 1274 */
1258 if (likely(&hb1->chain != &hb2->chain)) { 1275 if (likely(&hb1->chain != &hb2->chain)) {
1259 plist_del(&q->list, &hb1->chain); 1276 plist_del(&q->list, &hb1->chain);
1277 hb_waiters_dec(hb1);
1260 plist_add(&q->list, &hb2->chain); 1278 plist_add(&q->list, &hb2->chain);
1279 hb_waiters_inc(hb2);
1261 q->lock_ptr = &hb2->lock; 1280 q->lock_ptr = &hb2->lock;
1262 } 1281 }
1263 get_futex_key_refs(key2); 1282 get_futex_key_refs(key2);
@@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1600 struct futex_hash_bucket *hb; 1619 struct futex_hash_bucket *hb;
1601 1620
1602 hb = hash_futex(&q->key); 1621 hb = hash_futex(&q->key);
1622
1623 /*
1624 * Increment the counter before taking the lock so that
1625 * a potential waker won't miss a to-be-slept task that is
1626 * waiting for the spinlock. This is safe as all queue_lock()
1627 * users end up calling queue_me(). Similarly, for housekeeping,
1628 * decrement the counter at queue_unlock() when some error has
1629 * occurred and we don't end up adding the task to the list.
1630 */
1631 hb_waiters_inc(hb);
1632
1603 q->lock_ptr = &hb->lock; 1633 q->lock_ptr = &hb->lock;
1604 1634
1605 spin_lock(&hb->lock); /* implies MB (A) */ 1635 spin_lock(&hb->lock); /* implies MB (A) */
@@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb)
1611 __releases(&hb->lock) 1641 __releases(&hb->lock)
1612{ 1642{
1613 spin_unlock(&hb->lock); 1643 spin_unlock(&hb->lock);
1644 hb_waiters_dec(hb);
1614} 1645}
1615 1646
1616/** 1647/**
@@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2342 * Unqueue the futex_q and determine which it was. 2373 * Unqueue the futex_q and determine which it was.
2343 */ 2374 */
2344 plist_del(&q->list, &hb->chain); 2375 plist_del(&q->list, &hb->chain);
2376 hb_waiters_dec(hb);
2345 2377
2346 /* Handle spurious wakeups gracefully */ 2378 /* Handle spurious wakeups gracefully */
2347 ret = -EWOULDBLOCK; 2379 ret = -EWOULDBLOCK;
@@ -2875,6 +2907,7 @@ static int __init futex_init(void)
2875 futex_cmpxchg_enabled = 1; 2907 futex_cmpxchg_enabled = 1;
2876 2908
2877 for (i = 0; i < futex_hashsize; i++) { 2909 for (i = 0; i < futex_hashsize; i++) {
2910 atomic_set(&futex_queues[i].waiters, 0);
2878 plist_head_init(&futex_queues[i].chain); 2911 plist_head_init(&futex_queues[i].chain);
2879 spin_lock_init(&futex_queues[i].lock); 2912 spin_lock_init(&futex_queues[i].lock);
2880 } 2913 }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 0aa4ce81bc16..5b40279ecd71 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1435,7 +1435,8 @@ void update_wall_time(void)
1435out: 1435out:
1436 raw_spin_unlock_irqrestore(&timekeeper_lock, flags); 1436 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1437 if (clock_set) 1437 if (clock_set)
1438 clock_was_set(); 1438 /* Have to call _delayed version, since in irq context*/
1439 clock_was_set_delayed();
1439} 1440}
1440 1441
1441/** 1442/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 815c878f409b..24c1f2382557 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1600} 1600}
1601EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); 1601EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1602 1602
1603static struct ring_buffer *temp_buffer;
1604
1603struct ring_buffer_event * 1605struct ring_buffer_event *
1604trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, 1606trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1605 struct ftrace_event_file *ftrace_file, 1607 struct ftrace_event_file *ftrace_file,
1606 int type, unsigned long len, 1608 int type, unsigned long len,
1607 unsigned long flags, int pc) 1609 unsigned long flags, int pc)
1608{ 1610{
1611 struct ring_buffer_event *entry;
1612
1609 *current_rb = ftrace_file->tr->trace_buffer.buffer; 1613 *current_rb = ftrace_file->tr->trace_buffer.buffer;
1610 return trace_buffer_lock_reserve(*current_rb, 1614 entry = trace_buffer_lock_reserve(*current_rb,
1611 type, len, flags, pc); 1615 type, len, flags, pc);
1616 /*
1617 * If tracing is off, but we have triggers enabled
1618 * we still need to look at the event data. Use the temp_buffer
1619 * to store the trace event for the tigger to use. It's recusive
1620 * safe and will not be recorded anywhere.
1621 */
1622 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1623 *current_rb = temp_buffer;
1624 entry = trace_buffer_lock_reserve(*current_rb,
1625 type, len, flags, pc);
1626 }
1627 return entry;
1612} 1628}
1613EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); 1629EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1614 1630
@@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void)
6494 6510
6495 raw_spin_lock_init(&global_trace.start_lock); 6511 raw_spin_lock_init(&global_trace.start_lock);
6496 6512
6513 /* Used for event triggers */
6514 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6515 if (!temp_buffer)
6516 goto out_free_cpumask;
6517
6497 /* TODO: make the number of buffers hot pluggable with CPUS */ 6518 /* TODO: make the number of buffers hot pluggable with CPUS */
6498 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { 6519 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
6499 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); 6520 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6500 WARN_ON(1); 6521 WARN_ON(1);
6501 goto out_free_cpumask; 6522 goto out_free_temp_buffer;
6502 } 6523 }
6503 6524
6504 if (global_trace.buffer_disabled) 6525 if (global_trace.buffer_disabled)
@@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void)
6540 6561
6541 return 0; 6562 return 0;
6542 6563
6564out_free_temp_buffer:
6565 ring_buffer_free(temp_buffer);
6543out_free_cpumask: 6566out_free_cpumask:
6544 free_percpu(global_trace.trace_buffer.data); 6567 free_percpu(global_trace.trace_buffer.data);
6545#ifdef CONFIG_TRACER_MAX_TRACE 6568#ifdef CONFIG_TRACER_MAX_TRACE
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f3989ceb5cd5..7b16d40bd64d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -27,12 +27,6 @@
27 27
28DEFINE_MUTEX(event_mutex); 28DEFINE_MUTEX(event_mutex);
29 29
30DEFINE_MUTEX(event_storage_mutex);
31EXPORT_SYMBOL_GPL(event_storage_mutex);
32
33char event_storage[EVENT_STORAGE_SIZE];
34EXPORT_SYMBOL_GPL(event_storage);
35
36LIST_HEAD(ftrace_events); 30LIST_HEAD(ftrace_events);
37static LIST_HEAD(ftrace_common_fields); 31static LIST_HEAD(ftrace_common_fields);
38 32
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7c3e3e72e2b6..ee0a5098ac43 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
95#undef __array 95#undef __array
96#define __array(type, item, len) \ 96#define __array(type, item, len) \
97 do { \ 97 do { \
98 char *type_str = #type"["__stringify(len)"]"; \
98 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 99 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
99 mutex_lock(&event_storage_mutex); \ 100 ret = trace_define_field(event_call, type_str, #item, \
100 snprintf(event_storage, sizeof(event_storage), \
101 "%s[%d]", #type, len); \
102 ret = trace_define_field(event_call, event_storage, #item, \
103 offsetof(typeof(field), item), \ 101 offsetof(typeof(field), item), \
104 sizeof(field.item), \ 102 sizeof(field.item), \
105 is_signed_type(type), filter_type); \ 103 is_signed_type(type), filter_type); \
106 mutex_unlock(&event_storage_mutex); \
107 if (ret) \ 104 if (ret) \
108 return ret; \ 105 return ret; \
109 } while (0); 106 } while (0);
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 4dc1b990aa23..34fd931b54b5 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -9,7 +9,7 @@ if FONT_SUPPORT
9 9
10config FONTS 10config FONTS
11 bool "Select compiled-in fonts" 11 bool "Select compiled-in fonts"
12 depends on FRAMEBUFFER_CONSOLE 12 depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
13 help 13 help
14 Say Y here if you would like to use fonts other than the default 14 Say Y here if you would like to use fonts other than the default
15 your frame buffer console usually use. 15 your frame buffer console usually use.
@@ -22,7 +22,7 @@ config FONTS
22 22
23config FONT_8x8 23config FONT_8x8
24 bool "VGA 8x8 font" if FONTS 24 bool "VGA 8x8 font" if FONTS
25 depends on FRAMEBUFFER_CONSOLE 25 depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
26 default y if !SPARC && !FONTS 26 default y if !SPARC && !FONTS
27 help 27 help
28 This is the "high resolution" font for the VGA frame buffer (the one 28 This is the "high resolution" font for the VGA frame buffer (the one
@@ -45,7 +45,7 @@ config FONT_8x16
45 45
46config FONT_6x11 46config FONT_6x11
47 bool "Mac console 6x11 font (not supported by all drivers)" if FONTS 47 bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
48 depends on FRAMEBUFFER_CONSOLE 48 depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
49 default y if !SPARC && !FONTS && MAC 49 default y if !SPARC && !FONTS && MAC
50 help 50 help
51 Small console font with Macintosh-style high-half glyphs. Some Mac 51 Small console font with Macintosh-style high-half glyphs. Some Mac
diff --git a/lib/random32.c b/lib/random32.c
index 1e5b2df44291..614896778700 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
244 static bool latch = false; 244 static bool latch = false;
245 static DEFINE_SPINLOCK(lock); 245 static DEFINE_SPINLOCK(lock);
246 246
247 /* Asking for random bytes might result in bytes getting
248 * moved into the nonblocking pool and thus marking it
249 * as initialized. In this case we would double back into
250 * this function and attempt to do a late reseed.
251 * Ignore the pointless attempt to reseed again if we're
252 * already waiting for bytes when the nonblocking pool
253 * got initialized.
254 */
255
247 /* only allow initial seeding (late == false) once */ 256 /* only allow initial seeding (late == false) once */
248 spin_lock_irqsave(&lock, flags); 257 if (!spin_trylock_irqsave(&lock, flags))
258 return;
259
249 if (latch && !late) 260 if (latch && !late)
250 goto out; 261 goto out;
251 latch = true; 262 latch = true;
diff --git a/mm/fremap.c b/mm/fremap.c
index bbc4d660221a..34feba60a17e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -23,28 +23,44 @@
23 23
24#include "internal.h" 24#include "internal.h"
25 25
26static int mm_counter(struct page *page)
27{
28 return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
29}
30
26static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, 31static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
27 unsigned long addr, pte_t *ptep) 32 unsigned long addr, pte_t *ptep)
28{ 33{
29 pte_t pte = *ptep; 34 pte_t pte = *ptep;
35 struct page *page;
36 swp_entry_t entry;
30 37
31 if (pte_present(pte)) { 38 if (pte_present(pte)) {
32 struct page *page;
33
34 flush_cache_page(vma, addr, pte_pfn(pte)); 39 flush_cache_page(vma, addr, pte_pfn(pte));
35 pte = ptep_clear_flush(vma, addr, ptep); 40 pte = ptep_clear_flush(vma, addr, ptep);
36 page = vm_normal_page(vma, addr, pte); 41 page = vm_normal_page(vma, addr, pte);
37 if (page) { 42 if (page) {
38 if (pte_dirty(pte)) 43 if (pte_dirty(pte))
39 set_page_dirty(page); 44 set_page_dirty(page);
45 update_hiwater_rss(mm);
46 dec_mm_counter(mm, mm_counter(page));
40 page_remove_rmap(page); 47 page_remove_rmap(page);
41 page_cache_release(page); 48 page_cache_release(page);
49 }
50 } else { /* zap_pte() is not called when pte_none() */
51 if (!pte_file(pte)) {
42 update_hiwater_rss(mm); 52 update_hiwater_rss(mm);
43 dec_mm_counter(mm, MM_FILEPAGES); 53 entry = pte_to_swp_entry(pte);
54 if (non_swap_entry(entry)) {
55 if (is_migration_entry(entry)) {
56 page = migration_entry_to_page(entry);
57 dec_mm_counter(mm, mm_counter(page));
58 }
59 } else {
60 free_swap_and_cache(entry);
61 dec_mm_counter(mm, MM_SWAPENTS);
62 }
44 } 63 }
45 } else {
46 if (!pte_file(pte))
47 free_swap_and_cache(pte_to_swp_entry(pte));
48 pte_clear_not_present_full(mm, addr, ptep, 0); 64 pte_clear_not_present_full(mm, addr, ptep, 0);
49 } 65 }
50} 66}
diff --git a/mm/migrate.c b/mm/migrate.c
index b494fdb9a636..bed48809e5d0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,37 @@ out:
178} 178}
179 179
180/* 180/*
181 * Congratulations to trinity for discovering this bug.
182 * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
183 * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
184 * replace the specified range by file ptes throughout (maybe populated after).
185 * If page migration finds a page within that range, while it's still located
186 * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
187 * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
188 * But if the migrating page is in a part of the vma outside the range to be
189 * remapped, then it will not be cleared, and remove_migration_ptes() needs to
190 * deal with it. Fortunately, this part of the vma is of course still linear,
191 * so we just need to use linear location on the nonlinear list.
192 */
193static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
194 struct address_space *mapping, void *arg)
195{
196 struct vm_area_struct *vma;
197 /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
198 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
199 unsigned long addr;
200
201 list_for_each_entry(vma,
202 &mapping->i_mmap_nonlinear, shared.nonlinear) {
203
204 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
205 if (addr >= vma->vm_start && addr < vma->vm_end)
206 remove_migration_pte(page, vma, addr, arg);
207 }
208 return SWAP_AGAIN;
209}
210
211/*
181 * Get rid of all migration entries and replace them by 212 * Get rid of all migration entries and replace them by
182 * references to the indicated page. 213 * references to the indicated page.
183 */ 214 */
@@ -186,6 +217,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
186 struct rmap_walk_control rwc = { 217 struct rmap_walk_control rwc = {
187 .rmap_one = remove_migration_pte, 218 .rmap_one = remove_migration_pte,
188 .arg = old, 219 .arg = old,
220 .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
189 }; 221 };
190 222
191 rmap_walk(new, &rwc); 223 rmap_walk(new, &rwc);
diff --git a/mm/rmap.c b/mm/rmap.c
index d9d42316a99a..8fc049f9a5a6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1360,8 +1360,9 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1360} 1360}
1361 1361
1362static int try_to_unmap_nonlinear(struct page *page, 1362static int try_to_unmap_nonlinear(struct page *page,
1363 struct address_space *mapping, struct vm_area_struct *vma) 1363 struct address_space *mapping, void *arg)
1364{ 1364{
1365 struct vm_area_struct *vma;
1365 int ret = SWAP_AGAIN; 1366 int ret = SWAP_AGAIN;
1366 unsigned long cursor; 1367 unsigned long cursor;
1367 unsigned long max_nl_cursor = 0; 1368 unsigned long max_nl_cursor = 0;
@@ -1663,7 +1664,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1663 if (list_empty(&mapping->i_mmap_nonlinear)) 1664 if (list_empty(&mapping->i_mmap_nonlinear))
1664 goto done; 1665 goto done;
1665 1666
1666 ret = rwc->file_nonlinear(page, mapping, vma); 1667 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1667 1668
1668done: 1669done:
1669 mutex_unlock(&mapping->i_mmap_mutex); 1670 mutex_unlock(&mapping->i_mmap_mutex);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ec9909935fb6..175273f38cb1 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
307static void vlan_transfer_features(struct net_device *dev, 307static void vlan_transfer_features(struct net_device *dev,
308 struct net_device *vlandev) 308 struct net_device *vlandev)
309{ 309{
310 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
311
310 vlandev->gso_max_size = dev->gso_max_size; 312 vlandev->gso_max_size = dev->gso_max_size;
311 313
312 if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) 314 if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
313 vlandev->hard_header_len = dev->hard_header_len; 315 vlandev->hard_header_len = dev->hard_header_len;
314 else 316 else
315 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; 317 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4b65aa492fb6..27bfe2f8e2de 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev)
578 578
579 dev->features |= real_dev->vlan_features | NETIF_F_LLTX; 579 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
580 dev->gso_max_size = real_dev->gso_max_size; 580 dev->gso_max_size = real_dev->gso_max_size;
581 if (dev->features & NETIF_F_VLAN_FEATURES)
582 netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
583
581 584
582 /* ipv6 shared card related stuff */ 585 /* ipv6 shared card related stuff */
583 dev->dev_id = real_dev->dev_id; 586 dev->dev_id = real_dev->dev_id;
@@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev)
592#endif 595#endif
593 596
594 dev->needed_headroom = real_dev->needed_headroom; 597 dev->needed_headroom = real_dev->needed_headroom;
595 if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { 598 if (vlan_hw_offload_capable(real_dev->features,
599 vlan_dev_priv(dev)->vlan_proto)) {
596 dev->header_ops = &vlan_passthru_header_ops; 600 dev->header_ops = &vlan_passthru_header_ops;
597 dev->hard_header_len = real_dev->hard_header_len; 601 dev->hard_header_len = real_dev->hard_header_len;
598 } else { 602 } else {
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 63f0455c0bc3..8fe8b71b487a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
49 brstats->tx_bytes += skb->len; 49 brstats->tx_bytes += skb->len;
50 u64_stats_update_end(&brstats->syncp); 50 u64_stats_update_end(&brstats->syncp);
51 51
52 if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
53 goto out;
54
55 BR_INPUT_SKB_CB(skb)->brdev = dev; 52 BR_INPUT_SKB_CB(skb)->brdev = dev;
56 53
57 skb_reset_mac_header(skb); 54 skb_reset_mac_header(skb);
58 skb_pull(skb, ETH_HLEN); 55 skb_pull(skb, ETH_HLEN);
59 56
57 if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
58 goto out;
59
60 if (is_broadcast_ether_addr(dest)) 60 if (is_broadcast_ether_addr(dest))
61 br_flood_deliver(br, skb, false); 61 br_flood_deliver(br, skb, false);
62 else if (is_multicast_ether_addr(dest)) { 62 else if (is_multicast_ether_addr(dest)) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 28d544627422..d0cca3c65f01 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
29 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; 29 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
30 struct net_bridge *br = netdev_priv(brdev); 30 struct net_bridge *br = netdev_priv(brdev);
31 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); 31 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
32 struct net_port_vlans *pv;
32 33
33 u64_stats_update_begin(&brstats->syncp); 34 u64_stats_update_begin(&brstats->syncp);
34 brstats->rx_packets++; 35 brstats->rx_packets++;
@@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb)
39 * packet is allowed except in promisc modue when someone 40 * packet is allowed except in promisc modue when someone
40 * may be running packet capture. 41 * may be running packet capture.
41 */ 42 */
43 pv = br_get_vlan_info(br);
42 if (!(brdev->flags & IFF_PROMISC) && 44 if (!(brdev->flags & IFF_PROMISC) &&
43 !br_allowed_egress(br, br_get_vlan_info(br), skb)) { 45 !br_allowed_egress(br, pv, skb)) {
44 kfree_skb(skb); 46 kfree_skb(skb);
45 return NET_RX_DROP; 47 return NET_RX_DROP;
46 } 48 }
47 49
48 skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
49 if (!skb)
50 return NET_RX_DROP;
51
52 indev = skb->dev; 50 indev = skb->dev;
53 skb->dev = brdev; 51 skb->dev = brdev;
52 skb = br_handle_vlan(br, pv, skb);
53 if (!skb)
54 return NET_RX_DROP;
54 55
55 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 56 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
56 netif_receive_skb); 57 netif_receive_skb);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 8249ca764c79..f23c74b3a953 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v)
119 kfree_rcu(v, rcu); 119 kfree_rcu(v, rcu);
120} 120}
121 121
122/* Strip the tag from the packet. Will return skb with tci set 0. */
123static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
124{
125 if (skb->protocol != htons(ETH_P_8021Q)) {
126 skb->vlan_tci = 0;
127 return skb;
128 }
129
130 skb->vlan_tci = 0;
131 skb = vlan_untag(skb);
132 if (skb)
133 skb->vlan_tci = 0;
134
135 return skb;
136}
137
138struct sk_buff *br_handle_vlan(struct net_bridge *br, 122struct sk_buff *br_handle_vlan(struct net_bridge *br,
139 const struct net_port_vlans *pv, 123 const struct net_port_vlans *pv,
140 struct sk_buff *skb) 124 struct sk_buff *skb)
@@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
144 if (!br->vlan_enabled) 128 if (!br->vlan_enabled)
145 goto out; 129 goto out;
146 130
131 /* Vlan filter table must be configured at this point. The
132 * only exception is the bridge is set in promisc mode and the
133 * packet is destined for the bridge device. In this case
134 * pass the packet as is.
135 */
136 if (!pv) {
137 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
138 goto out;
139 } else {
140 kfree_skb(skb);
141 return NULL;
142 }
143 }
144
147 /* At this point, we know that the frame was filtered and contains 145 /* At this point, we know that the frame was filtered and contains
148 * a valid vlan id. If the vlan id is set in the untagged bitmap, 146 * a valid vlan id. If the vlan id is set in the untagged bitmap,
149 * send untagged; otherwise, send tagged. 147 * send untagged; otherwise, send tagged.
150 */ 148 */
151 br_vlan_get_tag(skb, &vid); 149 br_vlan_get_tag(skb, &vid);
152 if (test_bit(vid, pv->untagged_bitmap)) 150 if (test_bit(vid, pv->untagged_bitmap))
153 skb = br_vlan_untag(skb); 151 skb->vlan_tci = 0;
154 152
155out: 153out:
156 return skb; 154 return skb;
@@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
174 if (!v) 172 if (!v)
175 return false; 173 return false;
176 174
175 /* If vlan tx offload is disabled on bridge device and frame was
176 * sent from vlan device on the bridge device, it does not have
177 * HW accelerated vlan tag.
178 */
179 if (unlikely(!vlan_tx_tag_present(skb) &&
180 (skb->protocol == htons(ETH_P_8021Q) ||
181 skb->protocol == htons(ETH_P_8021AD)))) {
182 skb = vlan_untag(skb);
183 if (unlikely(!skb))
184 return false;
185 }
186
177 err = br_vlan_get_tag(skb, vid); 187 err = br_vlan_get_tag(skb, vid);
178 if (!*vid) { 188 if (!*vid) {
179 u16 pvid = br_get_pvid(v); 189 u16 pvid = br_get_pvid(v);
diff --git a/net/core/dev.c b/net/core/dev.c
index b1b0c8d4d7df..45fa2f11f84d 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2286,7 +2286,7 @@ out:
2286} 2286}
2287EXPORT_SYMBOL(skb_checksum_help); 2287EXPORT_SYMBOL(skb_checksum_help);
2288 2288
2289__be16 skb_network_protocol(struct sk_buff *skb) 2289__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2290{ 2290{
2291 __be16 type = skb->protocol; 2291 __be16 type = skb->protocol;
2292 int vlan_depth = ETH_HLEN; 2292 int vlan_depth = ETH_HLEN;
@@ -2313,6 +2313,8 @@ __be16 skb_network_protocol(struct sk_buff *skb)
2313 vlan_depth += VLAN_HLEN; 2313 vlan_depth += VLAN_HLEN;
2314 } 2314 }
2315 2315
2316 *depth = vlan_depth;
2317
2316 return type; 2318 return type;
2317} 2319}
2318 2320
@@ -2326,12 +2328,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2326{ 2328{
2327 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 2329 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2328 struct packet_offload *ptype; 2330 struct packet_offload *ptype;
2329 __be16 type = skb_network_protocol(skb); 2331 int vlan_depth = skb->mac_len;
2332 __be16 type = skb_network_protocol(skb, &vlan_depth);
2330 2333
2331 if (unlikely(!type)) 2334 if (unlikely(!type))
2332 return ERR_PTR(-EINVAL); 2335 return ERR_PTR(-EINVAL);
2333 2336
2334 __skb_pull(skb, skb->mac_len); 2337 __skb_pull(skb, vlan_depth);
2335 2338
2336 rcu_read_lock(); 2339 rcu_read_lock();
2337 list_for_each_entry_rcu(ptype, &offload_base, list) { 2340 list_for_each_entry_rcu(ptype, &offload_base, list) {
@@ -2498,8 +2501,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
2498 const struct net_device *dev, 2501 const struct net_device *dev,
2499 netdev_features_t features) 2502 netdev_features_t features)
2500{ 2503{
2504 int tmp;
2505
2501 if (skb->ip_summed != CHECKSUM_NONE && 2506 if (skb->ip_summed != CHECKSUM_NONE &&
2502 !can_checksum_protocol(features, skb_network_protocol(skb))) { 2507 !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
2503 features &= ~NETIF_F_ALL_CSUM; 2508 features &= ~NETIF_F_ALL_CSUM;
2504 } else if (illegal_highdma(dev, skb)) { 2509 } else if (illegal_highdma(dev, skb)) {
2505 features &= ~NETIF_F_SG; 2510 features &= ~NETIF_F_SG;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a664f7829a6d..df9e6b1a9759 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
742 struct nd_msg *msg; 742 struct nd_msg *msg;
743 struct ipv6hdr *hdr; 743 struct ipv6hdr *hdr;
744 744
745 if (skb->protocol != htons(ETH_P_ARP)) 745 if (skb->protocol != htons(ETH_P_IPV6))
746 return false; 746 return false;
747 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) 747 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
748 return false; 748 return false;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1a0dac2ef9ad..120eecc0f5a4 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2121,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
2121static int nlmsg_populate_fdb_fill(struct sk_buff *skb, 2121static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
2122 struct net_device *dev, 2122 struct net_device *dev,
2123 u8 *addr, u32 pid, u32 seq, 2123 u8 *addr, u32 pid, u32 seq,
2124 int type, unsigned int flags) 2124 int type, unsigned int flags,
2125 int nlflags)
2125{ 2126{
2126 struct nlmsghdr *nlh; 2127 struct nlmsghdr *nlh;
2127 struct ndmsg *ndm; 2128 struct ndmsg *ndm;
2128 2129
2129 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); 2130 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
2130 if (!nlh) 2131 if (!nlh)
2131 return -EMSGSIZE; 2132 return -EMSGSIZE;
2132 2133
@@ -2164,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
2164 if (!skb) 2165 if (!skb)
2165 goto errout; 2166 goto errout;
2166 2167
2167 err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); 2168 err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
2168 if (err < 0) { 2169 if (err < 0) {
2169 kfree_skb(skb); 2170 kfree_skb(skb);
2170 goto errout; 2171 goto errout;
@@ -2389,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
2389 2390
2390 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, 2391 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2391 portid, seq, 2392 portid, seq,
2392 RTM_NEWNEIGH, NTF_SELF); 2393 RTM_NEWNEIGH, NTF_SELF,
2394 NLM_F_MULTI);
2393 if (err < 0) 2395 if (err < 0)
2394 return err; 2396 return err;
2395skip: 2397skip:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 869c7afe3b07..90b96a11b974 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2127 * 2127 *
2128 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the 2128 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2129 * headroom in the `to` buffer. 2129 * headroom in the `to` buffer.
2130 *
2131 * Return value:
2132 * 0: everything is OK
2133 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2134 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2130 */ 2135 */
2131void 2136int
2132skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) 2137skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2133{ 2138{
2134 int i, j = 0; 2139 int i, j = 0;
2135 int plen = 0; /* length of skb->head fragment */ 2140 int plen = 0; /* length of skb->head fragment */
2141 int ret;
2136 struct page *page; 2142 struct page *page;
2137 unsigned int offset; 2143 unsigned int offset;
2138 2144
2139 BUG_ON(!from->head_frag && !hlen); 2145 BUG_ON(!from->head_frag && !hlen);
2140 2146
2141 /* dont bother with small payloads */ 2147 /* dont bother with small payloads */
2142 if (len <= skb_tailroom(to)) { 2148 if (len <= skb_tailroom(to))
2143 skb_copy_bits(from, 0, skb_put(to, len), len); 2149 return skb_copy_bits(from, 0, skb_put(to, len), len);
2144 return;
2145 }
2146 2150
2147 if (hlen) { 2151 if (hlen) {
2148 skb_copy_bits(from, 0, skb_put(to, hlen), hlen); 2152 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2153 if (unlikely(ret))
2154 return ret;
2149 len -= hlen; 2155 len -= hlen;
2150 } else { 2156 } else {
2151 plen = min_t(int, skb_headlen(from), len); 2157 plen = min_t(int, skb_headlen(from), len);
@@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
2163 to->len += len + plen; 2169 to->len += len + plen;
2164 to->data_len += len + plen; 2170 to->data_len += len + plen;
2165 2171
2172 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2173 skb_tx_error(from);
2174 return -ENOMEM;
2175 }
2176
2166 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { 2177 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2167 if (!len) 2178 if (!len)
2168 break; 2179 break;
@@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
2173 j++; 2184 j++;
2174 } 2185 }
2175 skb_shinfo(to)->nr_frags = j; 2186 skb_shinfo(to)->nr_frags = j;
2187
2188 return 0;
2176} 2189}
2177EXPORT_SYMBOL_GPL(skb_zerocopy); 2190EXPORT_SYMBOL_GPL(skb_zerocopy);
2178 2191
@@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2866 int err = -ENOMEM; 2879 int err = -ENOMEM;
2867 int i = 0; 2880 int i = 0;
2868 int pos; 2881 int pos;
2882 int dummy;
2869 2883
2870 proto = skb_network_protocol(head_skb); 2884 proto = skb_network_protocol(head_skb, &dummy);
2871 if (unlikely(!proto)) 2885 if (unlikely(!proto))
2872 return ERR_PTR(-EINVAL); 2886 return ERR_PTR(-EINVAL);
2873 2887
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 1863422fb7d5..250be7421ab3 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
182 int i; 182 int i;
183 bool csum_err = false; 183 bool csum_err = false;
184 184
185#ifdef CONFIG_NET_IPGRE_BROADCAST
186 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
187 /* Looped back packet, drop it! */
188 if (rt_is_output_route(skb_rtable(skb)))
189 goto drop;
190 }
191#endif
192
185 if (parse_gre_header(skb, &tpi, &csum_err) < 0) 193 if (parse_gre_header(skb, &tpi, &csum_err) < 0)
186 goto drop; 194 goto drop;
187 195
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 78a89e61925d..a82a22d8f77f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -416,9 +416,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
416 416
417#ifdef CONFIG_NET_IPGRE_BROADCAST 417#ifdef CONFIG_NET_IPGRE_BROADCAST
418 if (ipv4_is_multicast(iph->daddr)) { 418 if (ipv4_is_multicast(iph->daddr)) {
419 /* Looped back packet, drop it! */
420 if (rt_is_output_route(skb_rtable(skb)))
421 goto drop;
422 tunnel->dev->stats.multicast++; 419 tunnel->dev->stats.multicast++;
423 skb->pkt_type = PACKET_BROADCAST; 420 skb->pkt_type = PACKET_BROADCAST;
424 } 421 }
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6f847dd56dbc..8d69626f2206 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
108 nf_reset(skb); 108 nf_reset(skb);
109 secpath_reset(skb); 109 secpath_reset(skb);
110 skb_clear_hash_if_not_l4(skb); 110 skb_clear_hash_if_not_l4(skb);
111 skb_dst_drop(skb);
111 skb->vlan_tci = 0; 112 skb->vlan_tci = 0;
112 skb_set_queue_mapping(skb, 0); 113 skb_set_queue_mapping(skb, 0);
113 skb->pkt_type = PACKET_HOST; 114 skb->pkt_type = PACKET_HOST;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b9b3472975ba..28863570dd60 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2255} 2255}
2256 2256
2257static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, 2257static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2258 u32 portid, u32 seq, struct mfc_cache *c, int cmd) 2258 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2259 int flags)
2259{ 2260{
2260 struct nlmsghdr *nlh; 2261 struct nlmsghdr *nlh;
2261 struct rtmsg *rtm; 2262 struct rtmsg *rtm;
2262 int err; 2263 int err;
2263 2264
2264 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); 2265 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2265 if (nlh == NULL) 2266 if (nlh == NULL)
2266 return -EMSGSIZE; 2267 return -EMSGSIZE;
2267 2268
@@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2329 if (skb == NULL) 2330 if (skb == NULL)
2330 goto errout; 2331 goto errout;
2331 2332
2332 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); 2333 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2333 if (err < 0) 2334 if (err < 0)
2334 goto errout; 2335 goto errout;
2335 2336
@@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2368 if (ipmr_fill_mroute(mrt, skb, 2369 if (ipmr_fill_mroute(mrt, skb,
2369 NETLINK_CB(cb->skb).portid, 2370 NETLINK_CB(cb->skb).portid,
2370 cb->nlh->nlmsg_seq, 2371 cb->nlh->nlmsg_seq,
2371 mfc, RTM_NEWROUTE) < 0) 2372 mfc, RTM_NEWROUTE,
2373 NLM_F_MULTI) < 0)
2372 goto done; 2374 goto done;
2373next_entry: 2375next_entry:
2374 e++; 2376 e++;
@@ -2382,7 +2384,8 @@ next_entry:
2382 if (ipmr_fill_mroute(mrt, skb, 2384 if (ipmr_fill_mroute(mrt, skb,
2383 NETLINK_CB(cb->skb).portid, 2385 NETLINK_CB(cb->skb).portid,
2384 cb->nlh->nlmsg_seq, 2386 cb->nlh->nlmsg_seq,
2385 mfc, RTM_NEWROUTE) < 0) { 2387 mfc, RTM_NEWROUTE,
2388 NLM_F_MULTI) < 0) {
2386 spin_unlock_bh(&mfc_unres_lock); 2389 spin_unlock_bh(&mfc_unres_lock);
2387 goto done; 2390 goto done;
2388 } 2391 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3cf976510497..1e4eac779f51 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2628{ 2628{
2629 __be32 dest, src; 2629 __be32 dest, src;
2630 __u16 destp, srcp; 2630 __u16 destp, srcp;
2631 long delta = tw->tw_ttd - jiffies; 2631 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2632 2632
2633 dest = tw->tw_daddr; 2633 dest = tw->tw_daddr;
2634 src = tw->tw_rcv_saddr; 2634 src = tw->tw_rcv_saddr;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 344e972426df..6c7fa0853fc7 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
133static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; 133static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
134static DEFINE_SPINLOCK(addrconf_hash_lock); 134static DEFINE_SPINLOCK(addrconf_hash_lock);
135 135
136static void addrconf_verify(unsigned long); 136static void addrconf_verify(void);
137static void addrconf_verify_rtnl(void);
138static void addrconf_verify_work(struct work_struct *);
137 139
138static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0); 140static struct workqueue_struct *addrconf_wq;
139static DEFINE_SPINLOCK(addrconf_verify_lock); 141static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
140 142
141static void addrconf_join_anycast(struct inet6_ifaddr *ifp); 143static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
142static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); 144static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
@@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
151 u32 flags, u32 noflags); 153 u32 flags, u32 noflags);
152 154
153static void addrconf_dad_start(struct inet6_ifaddr *ifp); 155static void addrconf_dad_start(struct inet6_ifaddr *ifp);
154static void addrconf_dad_timer(unsigned long data); 156static void addrconf_dad_work(struct work_struct *w);
155static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 157static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
156static void addrconf_dad_run(struct inet6_dev *idev); 158static void addrconf_dad_run(struct inet6_dev *idev);
157static void addrconf_rs_timer(unsigned long data); 159static void addrconf_rs_timer(unsigned long data);
@@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev)
247 __in6_dev_put(idev); 249 __in6_dev_put(idev);
248} 250}
249 251
250static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp) 252static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
251{ 253{
252 if (del_timer(&ifp->dad_timer)) 254 if (cancel_delayed_work(&ifp->dad_work))
253 __in6_ifa_put(ifp); 255 __in6_ifa_put(ifp);
254} 256}
255 257
@@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
261 mod_timer(&idev->rs_timer, jiffies + when); 263 mod_timer(&idev->rs_timer, jiffies + when);
262} 264}
263 265
264static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp, 266static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
265 unsigned long when) 267 unsigned long delay)
266{ 268{
267 if (!timer_pending(&ifp->dad_timer)) 269 if (!delayed_work_pending(&ifp->dad_work))
268 in6_ifa_hold(ifp); 270 in6_ifa_hold(ifp);
269 mod_timer(&ifp->dad_timer, jiffies + when); 271 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
270} 272}
271 273
272static int snmp6_alloc_dev(struct inet6_dev *idev) 274static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
751 753
752 in6_dev_put(ifp->idev); 754 in6_dev_put(ifp->idev);
753 755
754 if (del_timer(&ifp->dad_timer)) 756 if (cancel_delayed_work(&ifp->dad_work))
755 pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); 757 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
758 ifp);
756 759
757 if (ifp->state != INET6_IFADDR_STATE_DEAD) { 760 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
758 pr_warn("Freeing alive inet6 address %p\n", ifp); 761 pr_warn("Freeing alive inet6 address %p\n", ifp);
@@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
849 852
850 spin_lock_init(&ifa->lock); 853 spin_lock_init(&ifa->lock);
851 spin_lock_init(&ifa->state_lock); 854 spin_lock_init(&ifa->state_lock);
852 setup_timer(&ifa->dad_timer, addrconf_dad_timer, 855 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
853 (unsigned long)ifa);
854 INIT_HLIST_NODE(&ifa->addr_lst); 856 INIT_HLIST_NODE(&ifa->addr_lst);
855 ifa->scope = scope; 857 ifa->scope = scope;
856 ifa->prefix_len = pfxlen; 858 ifa->prefix_len = pfxlen;
@@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
990 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; 992 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
991 unsigned long expires; 993 unsigned long expires;
992 994
995 ASSERT_RTNL();
996
993 spin_lock_bh(&ifp->state_lock); 997 spin_lock_bh(&ifp->state_lock);
994 state = ifp->state; 998 state = ifp->state;
995 ifp->state = INET6_IFADDR_STATE_DEAD; 999 ifp->state = INET6_IFADDR_STATE_DEAD;
@@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1021 1025
1022 write_unlock_bh(&ifp->idev->lock); 1026 write_unlock_bh(&ifp->idev->lock);
1023 1027
1024 addrconf_del_dad_timer(ifp); 1028 addrconf_del_dad_work(ifp);
1025 1029
1026 ipv6_ifa_notify(RTM_DELADDR, ifp); 1030 ipv6_ifa_notify(RTM_DELADDR, ifp);
1027 1031
@@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1604{ 1608{
1605 if (ifp->flags&IFA_F_PERMANENT) { 1609 if (ifp->flags&IFA_F_PERMANENT) {
1606 spin_lock_bh(&ifp->lock); 1610 spin_lock_bh(&ifp->lock);
1607 addrconf_del_dad_timer(ifp); 1611 addrconf_del_dad_work(ifp);
1608 ifp->flags |= IFA_F_TENTATIVE; 1612 ifp->flags |= IFA_F_TENTATIVE;
1609 if (dad_failed) 1613 if (dad_failed)
1610 ifp->flags |= IFA_F_DADFAILED; 1614 ifp->flags |= IFA_F_DADFAILED;
@@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1625 spin_unlock_bh(&ifp->lock); 1629 spin_unlock_bh(&ifp->lock);
1626 } 1630 }
1627 ipv6_del_addr(ifp); 1631 ipv6_del_addr(ifp);
1628 } else 1632 } else {
1629 ipv6_del_addr(ifp); 1633 ipv6_del_addr(ifp);
1634 }
1630} 1635}
1631 1636
1632static int addrconf_dad_end(struct inet6_ifaddr *ifp) 1637static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1633{ 1638{
1634 int err = -ENOENT; 1639 int err = -ENOENT;
1635 1640
1636 spin_lock(&ifp->state_lock); 1641 spin_lock_bh(&ifp->state_lock);
1637 if (ifp->state == INET6_IFADDR_STATE_DAD) { 1642 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1638 ifp->state = INET6_IFADDR_STATE_POSTDAD; 1643 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1639 err = 0; 1644 err = 0;
1640 } 1645 }
1641 spin_unlock(&ifp->state_lock); 1646 spin_unlock_bh(&ifp->state_lock);
1642 1647
1643 return err; 1648 return err;
1644} 1649}
@@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1671 } 1676 }
1672 } 1677 }
1673 1678
1674 addrconf_dad_stop(ifp, 1); 1679 spin_lock_bh(&ifp->state_lock);
1680 /* transition from _POSTDAD to _ERRDAD */
1681 ifp->state = INET6_IFADDR_STATE_ERRDAD;
1682 spin_unlock_bh(&ifp->state_lock);
1683
1684 addrconf_mod_dad_work(ifp, 0);
1675} 1685}
1676 1686
1677/* Join to solicited addr multicast group. */ 1687/* Join to solicited addr multicast group. */
@@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
1680{ 1690{
1681 struct in6_addr maddr; 1691 struct in6_addr maddr;
1682 1692
1693 ASSERT_RTNL();
1694
1683 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) 1695 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
1684 return; 1696 return;
1685 1697
@@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1691{ 1703{
1692 struct in6_addr maddr; 1704 struct in6_addr maddr;
1693 1705
1706 ASSERT_RTNL();
1707
1694 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) 1708 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
1695 return; 1709 return;
1696 1710
@@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1701static void addrconf_join_anycast(struct inet6_ifaddr *ifp) 1715static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1702{ 1716{
1703 struct in6_addr addr; 1717 struct in6_addr addr;
1718
1719 ASSERT_RTNL();
1720
1704 if (ifp->prefix_len >= 127) /* RFC 6164 */ 1721 if (ifp->prefix_len >= 127) /* RFC 6164 */
1705 return; 1722 return;
1706 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1723 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1712static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) 1729static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1713{ 1730{
1714 struct in6_addr addr; 1731 struct in6_addr addr;
1732
1733 ASSERT_RTNL();
1734
1715 if (ifp->prefix_len >= 127) /* RFC 6164 */ 1735 if (ifp->prefix_len >= 127) /* RFC 6164 */
1716 return; 1736 return;
1717 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1737 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@@ -2271,11 +2291,13 @@ ok:
2271 return; 2291 return;
2272 } 2292 }
2273 2293
2274 ifp->flags |= IFA_F_MANAGETEMPADDR;
2275 update_lft = 0; 2294 update_lft = 0;
2276 create = 1; 2295 create = 1;
2296 spin_lock_bh(&ifp->lock);
2297 ifp->flags |= IFA_F_MANAGETEMPADDR;
2277 ifp->cstamp = jiffies; 2298 ifp->cstamp = jiffies;
2278 ifp->tokenized = tokenized; 2299 ifp->tokenized = tokenized;
2300 spin_unlock_bh(&ifp->lock);
2279 addrconf_dad_start(ifp); 2301 addrconf_dad_start(ifp);
2280 } 2302 }
2281 2303
@@ -2326,7 +2348,7 @@ ok:
2326 create, now); 2348 create, now);
2327 2349
2328 in6_ifa_put(ifp); 2350 in6_ifa_put(ifp);
2329 addrconf_verify(0); 2351 addrconf_verify();
2330 } 2352 }
2331 } 2353 }
2332 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); 2354 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
@@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
2475 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft, 2497 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2476 true, jiffies); 2498 true, jiffies);
2477 in6_ifa_put(ifp); 2499 in6_ifa_put(ifp);
2478 addrconf_verify(0); 2500 addrconf_verify_rtnl();
2479 return 0; 2501 return 0;
2480 } 2502 }
2481 2503
@@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
3011 hlist_for_each_entry_rcu(ifa, h, addr_lst) { 3033 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3012 if (ifa->idev == idev) { 3034 if (ifa->idev == idev) {
3013 hlist_del_init_rcu(&ifa->addr_lst); 3035 hlist_del_init_rcu(&ifa->addr_lst);
3014 addrconf_del_dad_timer(ifa); 3036 addrconf_del_dad_work(ifa);
3015 goto restart; 3037 goto restart;
3016 } 3038 }
3017 } 3039 }
@@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
3049 while (!list_empty(&idev->addr_list)) { 3071 while (!list_empty(&idev->addr_list)) {
3050 ifa = list_first_entry(&idev->addr_list, 3072 ifa = list_first_entry(&idev->addr_list,
3051 struct inet6_ifaddr, if_list); 3073 struct inet6_ifaddr, if_list);
3052 addrconf_del_dad_timer(ifa); 3074 addrconf_del_dad_work(ifa);
3053 3075
3054 list_del(&ifa->if_list); 3076 list_del(&ifa->if_list);
3055 3077
@@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3148 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); 3170 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3149 3171
3150 ifp->dad_probes = idev->cnf.dad_transmits; 3172 ifp->dad_probes = idev->cnf.dad_transmits;
3151 addrconf_mod_dad_timer(ifp, rand_num); 3173 addrconf_mod_dad_work(ifp, rand_num);
3152} 3174}
3153 3175
3154static void addrconf_dad_start(struct inet6_ifaddr *ifp) 3176static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3155{ 3177{
3156 struct inet6_dev *idev = ifp->idev; 3178 struct inet6_dev *idev = ifp->idev;
3157 struct net_device *dev = idev->dev; 3179 struct net_device *dev = idev->dev;
@@ -3203,25 +3225,68 @@ out:
3203 read_unlock_bh(&idev->lock); 3225 read_unlock_bh(&idev->lock);
3204} 3226}
3205 3227
3206static void addrconf_dad_timer(unsigned long data) 3228static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3207{ 3229{
3208 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; 3230 bool begin_dad = false;
3231
3232 spin_lock_bh(&ifp->state_lock);
3233 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3234 ifp->state = INET6_IFADDR_STATE_PREDAD;
3235 begin_dad = true;
3236 }
3237 spin_unlock_bh(&ifp->state_lock);
3238
3239 if (begin_dad)
3240 addrconf_mod_dad_work(ifp, 0);
3241}
3242
3243static void addrconf_dad_work(struct work_struct *w)
3244{
3245 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3246 struct inet6_ifaddr,
3247 dad_work);
3209 struct inet6_dev *idev = ifp->idev; 3248 struct inet6_dev *idev = ifp->idev;
3210 struct in6_addr mcaddr; 3249 struct in6_addr mcaddr;
3211 3250
3251 enum {
3252 DAD_PROCESS,
3253 DAD_BEGIN,
3254 DAD_ABORT,
3255 } action = DAD_PROCESS;
3256
3257 rtnl_lock();
3258
3259 spin_lock_bh(&ifp->state_lock);
3260 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3261 action = DAD_BEGIN;
3262 ifp->state = INET6_IFADDR_STATE_DAD;
3263 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3264 action = DAD_ABORT;
3265 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3266 }
3267 spin_unlock_bh(&ifp->state_lock);
3268
3269 if (action == DAD_BEGIN) {
3270 addrconf_dad_begin(ifp);
3271 goto out;
3272 } else if (action == DAD_ABORT) {
3273 addrconf_dad_stop(ifp, 1);
3274 goto out;
3275 }
3276
3212 if (!ifp->dad_probes && addrconf_dad_end(ifp)) 3277 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3213 goto out; 3278 goto out;
3214 3279
3215 write_lock(&idev->lock); 3280 write_lock_bh(&idev->lock);
3216 if (idev->dead || !(idev->if_flags & IF_READY)) { 3281 if (idev->dead || !(idev->if_flags & IF_READY)) {
3217 write_unlock(&idev->lock); 3282 write_unlock_bh(&idev->lock);
3218 goto out; 3283 goto out;
3219 } 3284 }
3220 3285
3221 spin_lock(&ifp->lock); 3286 spin_lock(&ifp->lock);
3222 if (ifp->state == INET6_IFADDR_STATE_DEAD) { 3287 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3223 spin_unlock(&ifp->lock); 3288 spin_unlock(&ifp->lock);
3224 write_unlock(&idev->lock); 3289 write_unlock_bh(&idev->lock);
3225 goto out; 3290 goto out;
3226 } 3291 }
3227 3292
@@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data)
3232 3297
3233 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); 3298 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3234 spin_unlock(&ifp->lock); 3299 spin_unlock(&ifp->lock);
3235 write_unlock(&idev->lock); 3300 write_unlock_bh(&idev->lock);
3236 3301
3237 addrconf_dad_completed(ifp); 3302 addrconf_dad_completed(ifp);
3238 3303
@@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data)
3240 } 3305 }
3241 3306
3242 ifp->dad_probes--; 3307 ifp->dad_probes--;
3243 addrconf_mod_dad_timer(ifp, 3308 addrconf_mod_dad_work(ifp,
3244 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)); 3309 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3245 spin_unlock(&ifp->lock); 3310 spin_unlock(&ifp->lock);
3246 write_unlock(&idev->lock); 3311 write_unlock_bh(&idev->lock);
3247 3312
3248 /* send a neighbour solicitation for our addr */ 3313 /* send a neighbour solicitation for our addr */
3249 addrconf_addr_solict_mult(&ifp->addr, &mcaddr); 3314 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3250 ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); 3315 ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
3251out: 3316out:
3252 in6_ifa_put(ifp); 3317 in6_ifa_put(ifp);
3318 rtnl_unlock();
3253} 3319}
3254 3320
3255/* ifp->idev must be at least read locked */ 3321/* ifp->idev must be at least read locked */
@@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3276 struct in6_addr lladdr; 3342 struct in6_addr lladdr;
3277 bool send_rs, send_mld; 3343 bool send_rs, send_mld;
3278 3344
3279 addrconf_del_dad_timer(ifp); 3345 addrconf_del_dad_work(ifp);
3280 3346
3281 /* 3347 /*
3282 * Configure the address for reception. Now it is valid. 3348 * Configure the address for reception. Now it is valid.
@@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3517 * Periodic address status verification 3583 * Periodic address status verification
3518 */ 3584 */
3519 3585
3520static void addrconf_verify(unsigned long foo) 3586static void addrconf_verify_rtnl(void)
3521{ 3587{
3522 unsigned long now, next, next_sec, next_sched; 3588 unsigned long now, next, next_sec, next_sched;
3523 struct inet6_ifaddr *ifp; 3589 struct inet6_ifaddr *ifp;
3524 int i; 3590 int i;
3525 3591
3592 ASSERT_RTNL();
3593
3526 rcu_read_lock_bh(); 3594 rcu_read_lock_bh();
3527 spin_lock(&addrconf_verify_lock);
3528 now = jiffies; 3595 now = jiffies;
3529 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); 3596 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
3530 3597
3531 del_timer(&addr_chk_timer); 3598 cancel_delayed_work(&addr_chk_work);
3532 3599
3533 for (i = 0; i < IN6_ADDR_HSIZE; i++) { 3600 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3534restart: 3601restart:
3535 hlist_for_each_entry_rcu_bh(ifp, 3602 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
3536 &inet6_addr_lst[i], addr_lst) {
3537 unsigned long age; 3603 unsigned long age;
3538 3604
3539 /* When setting preferred_lft to a value not zero or 3605 /* When setting preferred_lft to a value not zero or
@@ -3628,13 +3694,22 @@ restart:
3628 3694
3629 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", 3695 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
3630 now, next, next_sec, next_sched); 3696 now, next, next_sec, next_sched);
3631 3697 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
3632 addr_chk_timer.expires = next_sched;
3633 add_timer(&addr_chk_timer);
3634 spin_unlock(&addrconf_verify_lock);
3635 rcu_read_unlock_bh(); 3698 rcu_read_unlock_bh();
3636} 3699}
3637 3700
3701static void addrconf_verify_work(struct work_struct *w)
3702{
3703 rtnl_lock();
3704 addrconf_verify_rtnl();
3705 rtnl_unlock();
3706}
3707
3708static void addrconf_verify(void)
3709{
3710 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
3711}
3712
3638static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, 3713static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
3639 struct in6_addr **peer_pfx) 3714 struct in6_addr **peer_pfx)
3640{ 3715{
@@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
3691 bool was_managetempaddr; 3766 bool was_managetempaddr;
3692 bool had_prefixroute; 3767 bool had_prefixroute;
3693 3768
3769 ASSERT_RTNL();
3770
3694 if (!valid_lft || (prefered_lft > valid_lft)) 3771 if (!valid_lft || (prefered_lft > valid_lft))
3695 return -EINVAL; 3772 return -EINVAL;
3696 3773
@@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
3756 !was_managetempaddr, jiffies); 3833 !was_managetempaddr, jiffies);
3757 } 3834 }
3758 3835
3759 addrconf_verify(0); 3836 addrconf_verify_rtnl();
3760 3837
3761 return 0; 3838 return 0;
3762} 3839}
@@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4386 bool update_rs = false; 4463 bool update_rs = false;
4387 struct in6_addr ll_addr; 4464 struct in6_addr ll_addr;
4388 4465
4466 ASSERT_RTNL();
4467
4389 if (token == NULL) 4468 if (token == NULL)
4390 return -EINVAL; 4469 return -EINVAL;
4391 if (ipv6_addr_any(token)) 4470 if (ipv6_addr_any(token))
@@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4434 } 4513 }
4435 4514
4436 write_unlock_bh(&idev->lock); 4515 write_unlock_bh(&idev->lock);
4437 addrconf_verify(0); 4516 addrconf_verify_rtnl();
4438 return 0; 4517 return 0;
4439} 4518}
4440 4519
@@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4636{ 4715{
4637 struct net *net = dev_net(ifp->idev->dev); 4716 struct net *net = dev_net(ifp->idev->dev);
4638 4717
4718 if (event)
4719 ASSERT_RTNL();
4720
4639 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); 4721 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
4640 4722
4641 switch (event) { 4723 switch (event) {
@@ -5244,6 +5326,12 @@ int __init addrconf_init(void)
5244 if (err < 0) 5326 if (err < 0)
5245 goto out_addrlabel; 5327 goto out_addrlabel;
5246 5328
5329 addrconf_wq = create_workqueue("ipv6_addrconf");
5330 if (!addrconf_wq) {
5331 err = -ENOMEM;
5332 goto out_nowq;
5333 }
5334
5247 /* The addrconf netdev notifier requires that loopback_dev 5335 /* The addrconf netdev notifier requires that loopback_dev
5248 * has it's ipv6 private information allocated and setup 5336 * has it's ipv6 private information allocated and setup
5249 * before it can bring up and give link-local addresses 5337 * before it can bring up and give link-local addresses
@@ -5274,7 +5362,7 @@ int __init addrconf_init(void)
5274 5362
5275 register_netdevice_notifier(&ipv6_dev_notf); 5363 register_netdevice_notifier(&ipv6_dev_notf);
5276 5364
5277 addrconf_verify(0); 5365 addrconf_verify();
5278 5366
5279 rtnl_af_register(&inet6_ops); 5367 rtnl_af_register(&inet6_ops);
5280 5368
@@ -5302,6 +5390,8 @@ errout:
5302 rtnl_af_unregister(&inet6_ops); 5390 rtnl_af_unregister(&inet6_ops);
5303 unregister_netdevice_notifier(&ipv6_dev_notf); 5391 unregister_netdevice_notifier(&ipv6_dev_notf);
5304errlo: 5392errlo:
5393 destroy_workqueue(addrconf_wq);
5394out_nowq:
5305 unregister_pernet_subsys(&addrconf_ops); 5395 unregister_pernet_subsys(&addrconf_ops);
5306out_addrlabel: 5396out_addrlabel:
5307 ipv6_addr_label_cleanup(); 5397 ipv6_addr_label_cleanup();
@@ -5337,7 +5427,8 @@ void addrconf_cleanup(void)
5337 for (i = 0; i < IN6_ADDR_HSIZE; i++) 5427 for (i = 0; i < IN6_ADDR_HSIZE; i++)
5338 WARN_ON(!hlist_empty(&inet6_addr_lst[i])); 5428 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
5339 spin_unlock_bh(&addrconf_hash_lock); 5429 spin_unlock_bh(&addrconf_hash_lock);
5340 5430 cancel_delayed_work(&addr_chk_work);
5341 del_timer(&addr_chk_timer);
5342 rtnl_unlock(); 5431 rtnl_unlock();
5432
5433 destroy_workqueue(addrconf_wq);
5343} 5434}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16f91a2e7888..64d6073731d3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1101,21 +1101,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
1101 unsigned int fragheaderlen, 1101 unsigned int fragheaderlen,
1102 struct sk_buff *skb, 1102 struct sk_buff *skb,
1103 struct rt6_info *rt, 1103 struct rt6_info *rt,
1104 bool pmtuprobe) 1104 unsigned int orig_mtu)
1105{ 1105{
1106 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { 1106 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1107 if (skb == NULL) { 1107 if (skb == NULL) {
1108 /* first fragment, reserve header_len */ 1108 /* first fragment, reserve header_len */
1109 *mtu = *mtu - rt->dst.header_len; 1109 *mtu = orig_mtu - rt->dst.header_len;
1110 1110
1111 } else { 1111 } else {
1112 /* 1112 /*
1113 * this fragment is not first, the headers 1113 * this fragment is not first, the headers
1114 * space is regarded as data space. 1114 * space is regarded as data space.
1115 */ 1115 */
1116 *mtu = min(*mtu, pmtuprobe ? 1116 *mtu = orig_mtu;
1117 rt->dst.dev->mtu :
1118 dst_mtu(rt->dst.path));
1119 } 1117 }
1120 *maxfraglen = ((*mtu - fragheaderlen) & ~7) 1118 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1121 + fragheaderlen - sizeof(struct frag_hdr); 1119 + fragheaderlen - sizeof(struct frag_hdr);
@@ -1132,7 +1130,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1132 struct ipv6_pinfo *np = inet6_sk(sk); 1130 struct ipv6_pinfo *np = inet6_sk(sk);
1133 struct inet_cork *cork; 1131 struct inet_cork *cork;
1134 struct sk_buff *skb, *skb_prev = NULL; 1132 struct sk_buff *skb, *skb_prev = NULL;
1135 unsigned int maxfraglen, fragheaderlen, mtu; 1133 unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1136 int exthdrlen; 1134 int exthdrlen;
1137 int dst_exthdrlen; 1135 int dst_exthdrlen;
1138 int hh_len; 1136 int hh_len;
@@ -1214,6 +1212,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1214 dst_exthdrlen = 0; 1212 dst_exthdrlen = 0;
1215 mtu = cork->fragsize; 1213 mtu = cork->fragsize;
1216 } 1214 }
1215 orig_mtu = mtu;
1217 1216
1218 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1217 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1219 1218
@@ -1311,8 +1310,7 @@ alloc_new_skb:
1311 if (skb == NULL || skb_prev == NULL) 1310 if (skb == NULL || skb_prev == NULL)
1312 ip6_append_data_mtu(&mtu, &maxfraglen, 1311 ip6_append_data_mtu(&mtu, &maxfraglen,
1313 fragheaderlen, skb, rt, 1312 fragheaderlen, skb, rt,
1314 np->pmtudisc >= 1313 orig_mtu);
1315 IPV6_PMTUDISC_PROBE);
1316 1314
1317 skb_prev = skb; 1315 skb_prev = skb;
1318 1316
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0eb4038a4d63..8737400af0a0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
2349} 2349}
2350 2350
2351static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, 2351static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2352 u32 portid, u32 seq, struct mfc6_cache *c, int cmd) 2352 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2353 int flags)
2353{ 2354{
2354 struct nlmsghdr *nlh; 2355 struct nlmsghdr *nlh;
2355 struct rtmsg *rtm; 2356 struct rtmsg *rtm;
2356 int err; 2357 int err;
2357 2358
2358 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); 2359 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2359 if (nlh == NULL) 2360 if (nlh == NULL)
2360 return -EMSGSIZE; 2361 return -EMSGSIZE;
2361 2362
@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2423 if (skb == NULL) 2424 if (skb == NULL)
2424 goto errout; 2425 goto errout;
2425 2426
2426 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); 2427 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2427 if (err < 0) 2428 if (err < 0)
2428 goto errout; 2429 goto errout;
2429 2430
@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2462 if (ip6mr_fill_mroute(mrt, skb, 2463 if (ip6mr_fill_mroute(mrt, skb,
2463 NETLINK_CB(cb->skb).portid, 2464 NETLINK_CB(cb->skb).portid,
2464 cb->nlh->nlmsg_seq, 2465 cb->nlh->nlmsg_seq,
2465 mfc, RTM_NEWROUTE) < 0) 2466 mfc, RTM_NEWROUTE,
2467 NLM_F_MULTI) < 0)
2466 goto done; 2468 goto done;
2467next_entry: 2469next_entry:
2468 e++; 2470 e++;
@@ -2476,7 +2478,8 @@ next_entry:
2476 if (ip6mr_fill_mroute(mrt, skb, 2478 if (ip6mr_fill_mroute(mrt, skb,
2477 NETLINK_CB(cb->skb).portid, 2479 NETLINK_CB(cb->skb).portid,
2478 cb->nlh->nlmsg_seq, 2480 cb->nlh->nlmsg_seq,
2479 mfc, RTM_NEWROUTE) < 0) { 2481 mfc, RTM_NEWROUTE,
2482 NLM_F_MULTI) < 0) {
2480 spin_unlock_bh(&mfc_unres_lock); 2483 spin_unlock_bh(&mfc_unres_lock);
2481 goto done; 2484 goto done;
2482 } 2485 }
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1a04c1329362..79326978517a 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -433,12 +433,13 @@ static inline int verify_sec_ctx_len(const void *p)
433 return 0; 433 return 0;
434} 434}
435 435
436static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx) 436static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx,
437 gfp_t gfp)
437{ 438{
438 struct xfrm_user_sec_ctx *uctx = NULL; 439 struct xfrm_user_sec_ctx *uctx = NULL;
439 int ctx_size = sec_ctx->sadb_x_ctx_len; 440 int ctx_size = sec_ctx->sadb_x_ctx_len;
440 441
441 uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL); 442 uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
442 443
443 if (!uctx) 444 if (!uctx)
444 return NULL; 445 return NULL;
@@ -1124,7 +1125,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1124 1125
1125 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; 1126 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
1126 if (sec_ctx != NULL) { 1127 if (sec_ctx != NULL) {
1127 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 1128 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
1128 1129
1129 if (!uctx) 1130 if (!uctx)
1130 goto out; 1131 goto out;
@@ -2231,14 +2232,14 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2231 2232
2232 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; 2233 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
2233 if (sec_ctx != NULL) { 2234 if (sec_ctx != NULL) {
2234 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 2235 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
2235 2236
2236 if (!uctx) { 2237 if (!uctx) {
2237 err = -ENOBUFS; 2238 err = -ENOBUFS;
2238 goto out; 2239 goto out;
2239 } 2240 }
2240 2241
2241 err = security_xfrm_policy_alloc(&xp->security, uctx); 2242 err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL);
2242 kfree(uctx); 2243 kfree(uctx);
2243 2244
2244 if (err) 2245 if (err)
@@ -2335,12 +2336,12 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2335 2336
2336 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; 2337 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
2337 if (sec_ctx != NULL) { 2338 if (sec_ctx != NULL) {
2338 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 2339 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
2339 2340
2340 if (!uctx) 2341 if (!uctx)
2341 return -ENOMEM; 2342 return -ENOMEM;
2342 2343
2343 err = security_xfrm_policy_alloc(&pol_ctx, uctx); 2344 err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL);
2344 kfree(uctx); 2345 kfree(uctx);
2345 if (err) 2346 if (err)
2346 return err; 2347 return err;
@@ -3239,8 +3240,8 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3239 } 3240 }
3240 if ((*dir = verify_sec_ctx_len(p))) 3241 if ((*dir = verify_sec_ctx_len(p)))
3241 goto out; 3242 goto out;
3242 uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 3243 uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC);
3243 *dir = security_xfrm_policy_alloc(&xp->security, uctx); 3244 *dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC);
3244 kfree(uctx); 3245 kfree(uctx);
3245 3246
3246 if (*dir) 3247 if (*dir)
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index f072fe803510..108120f216b1 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
354 354
355 skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, 355 skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
356 GFP_ATOMIC); 356 GFP_ATOMIC);
357 if (!skb) 357 if (!skb) {
358 skb_tx_error(entskb);
358 return NULL; 359 return NULL;
360 }
359 361
360 nlh = nlmsg_put(skb, 0, 0, 362 nlh = nlmsg_put(skb, 0, 0,
361 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, 363 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
362 sizeof(struct nfgenmsg), 0); 364 sizeof(struct nfgenmsg), 0);
363 if (!nlh) { 365 if (!nlh) {
366 skb_tx_error(entskb);
364 kfree_skb(skb); 367 kfree_skb(skb);
365 return NULL; 368 return NULL;
366 } 369 }
@@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
488 nla->nla_type = NFQA_PAYLOAD; 491 nla->nla_type = NFQA_PAYLOAD;
489 nla->nla_len = nla_attr_size(data_len); 492 nla->nla_len = nla_attr_size(data_len);
490 493
491 skb_zerocopy(skb, entskb, data_len, hlen); 494 if (skb_zerocopy(skb, entskb, data_len, hlen))
495 goto nla_put_failure;
492 } 496 }
493 497
494 nlh->nlmsg_len = skb->len; 498 nlh->nlmsg_len = skb->len;
495 return skb; 499 return skb;
496 500
497nla_put_failure: 501nla_put_failure:
502 skb_tx_error(entskb);
498 kfree_skb(skb); 503 kfree_skb(skb);
499 net_err_ratelimited("nf_queue: error creating packet message\n"); 504 net_err_ratelimited("nf_queue: error creating packet message\n");
500 return NULL; 505 return NULL;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e9a48baf8551..270b77dfac30 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
464 } 464 }
465 nla->nla_len = nla_attr_size(skb->len); 465 nla->nla_len = nla_attr_size(skb->len);
466 466
467 skb_zerocopy(user_skb, skb, skb->len, hlen); 467 err = skb_zerocopy(user_skb, skb, skb->len, hlen);
468 if (err)
469 goto out;
468 470
469 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ 471 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
470 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { 472 if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
@@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
478 480
479 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); 481 err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
480out: 482out:
483 if (err)
484 skb_tx_error(skb);
481 kfree_skb(nskb); 485 kfree_skb(nskb);
482 return err; 486 return err;
483} 487}
@@ -1174,7 +1178,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
1174 struct datapath *dp; 1178 struct datapath *dp;
1175 1179
1176 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1180 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1177 if (!dp) 1181 if (IS_ERR(dp))
1178 return; 1182 return;
1179 1183
1180 WARN(dp->user_features, "Dropping previously announced user features\n"); 1184 WARN(dp->user_features, "Dropping previously announced user features\n");
@@ -1762,11 +1766,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1762 int bucket = cb->args[0], skip = cb->args[1]; 1766 int bucket = cb->args[0], skip = cb->args[1];
1763 int i, j = 0; 1767 int i, j = 0;
1764 1768
1769 rcu_read_lock();
1765 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); 1770 dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1766 if (!dp) 1771 if (!dp) {
1772 rcu_read_unlock();
1767 return -ENODEV; 1773 return -ENODEV;
1768 1774 }
1769 rcu_read_lock();
1770 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { 1775 for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1771 struct vport *vport; 1776 struct vport *vport;
1772 1777
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 16f4b46161d4..2998989e76db 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -73,6 +73,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
73 73
74 if ((flow->key.eth.type == htons(ETH_P_IP) || 74 if ((flow->key.eth.type == htons(ETH_P_IP) ||
75 flow->key.eth.type == htons(ETH_P_IPV6)) && 75 flow->key.eth.type == htons(ETH_P_IPV6)) &&
76 flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
76 flow->key.ip.proto == IPPROTO_TCP && 77 flow->key.ip.proto == IPPROTO_TCP &&
77 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { 78 likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
78 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); 79 tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
@@ -91,7 +92,7 @@ static void stats_read(struct flow_stats *stats,
91 unsigned long *used, __be16 *tcp_flags) 92 unsigned long *used, __be16 *tcp_flags)
92{ 93{
93 spin_lock(&stats->lock); 94 spin_lock(&stats->lock);
94 if (time_after(stats->used, *used)) 95 if (!*used || time_after(stats->used, *used))
95 *used = stats->used; 96 *used = stats->used;
96 *tcp_flags |= stats->tcp_flags; 97 *tcp_flags |= stats->tcp_flags;
97 ovs_stats->n_packets += stats->packet_count; 98 ovs_stats->n_packets += stats->packet_count;
@@ -102,30 +103,24 @@ static void stats_read(struct flow_stats *stats,
102void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, 103void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
103 unsigned long *used, __be16 *tcp_flags) 104 unsigned long *used, __be16 *tcp_flags)
104{ 105{
105 int cpu, cur_cpu; 106 int cpu;
106 107
107 *used = 0; 108 *used = 0;
108 *tcp_flags = 0; 109 *tcp_flags = 0;
109 memset(ovs_stats, 0, sizeof(*ovs_stats)); 110 memset(ovs_stats, 0, sizeof(*ovs_stats));
110 111
112 local_bh_disable();
111 if (!flow->stats.is_percpu) { 113 if (!flow->stats.is_percpu) {
112 stats_read(flow->stats.stat, ovs_stats, used, tcp_flags); 114 stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
113 } else { 115 } else {
114 cur_cpu = get_cpu();
115 for_each_possible_cpu(cpu) { 116 for_each_possible_cpu(cpu) {
116 struct flow_stats *stats; 117 struct flow_stats *stats;
117 118
118 if (cpu == cur_cpu)
119 local_bh_disable();
120
121 stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); 119 stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
122 stats_read(stats, ovs_stats, used, tcp_flags); 120 stats_read(stats, ovs_stats, used, tcp_flags);
123
124 if (cpu == cur_cpu)
125 local_bh_enable();
126 } 121 }
127 put_cpu();
128 } 122 }
123 local_bh_enable();
129} 124}
130 125
131static void stats_reset(struct flow_stats *stats) 126static void stats_reset(struct flow_stats *stats)
@@ -140,25 +135,17 @@ static void stats_reset(struct flow_stats *stats)
140 135
141void ovs_flow_stats_clear(struct sw_flow *flow) 136void ovs_flow_stats_clear(struct sw_flow *flow)
142{ 137{
143 int cpu, cur_cpu; 138 int cpu;
144 139
140 local_bh_disable();
145 if (!flow->stats.is_percpu) { 141 if (!flow->stats.is_percpu) {
146 stats_reset(flow->stats.stat); 142 stats_reset(flow->stats.stat);
147 } else { 143 } else {
148 cur_cpu = get_cpu();
149
150 for_each_possible_cpu(cpu) { 144 for_each_possible_cpu(cpu) {
151
152 if (cpu == cur_cpu)
153 local_bh_disable();
154
155 stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); 145 stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
156
157 if (cpu == cur_cpu)
158 local_bh_enable();
159 } 146 }
160 put_cpu();
161 } 147 }
148 local_bh_enable();
162} 149}
163 150
164static int check_header(struct sk_buff *skb, int len) 151static int check_header(struct sk_buff *skb, int len)
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 11c9ae00837d..642437231ad5 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -263,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
263 * 263 *
264 * Called with subscriber lock held. 264 * Called with subscriber lock held.
265 */ 265 */
266static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, 266static int subscr_subscribe(struct tipc_subscr *s,
267 struct tipc_subscriber *subscriber) 267 struct tipc_subscriber *subscriber,
268{ 268 struct tipc_subscription **sub_p) {
269 struct tipc_subscription *sub; 269 struct tipc_subscription *sub;
270 int swap; 270 int swap;
271 271
@@ -276,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
276 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { 276 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
277 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); 277 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
278 subscr_cancel(s, subscriber); 278 subscr_cancel(s, subscriber);
279 return NULL; 279 return 0;
280 } 280 }
281 281
282 /* Refuse subscription if global limit exceeded */ 282 /* Refuse subscription if global limit exceeded */
283 if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { 283 if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
284 pr_warn("Subscription rejected, limit reached (%u)\n", 284 pr_warn("Subscription rejected, limit reached (%u)\n",
285 TIPC_MAX_SUBSCRIPTIONS); 285 TIPC_MAX_SUBSCRIPTIONS);
286 subscr_terminate(subscriber); 286 return -EINVAL;
287 return NULL;
288 } 287 }
289 288
290 /* Allocate subscription object */ 289 /* Allocate subscription object */
291 sub = kmalloc(sizeof(*sub), GFP_ATOMIC); 290 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
292 if (!sub) { 291 if (!sub) {
293 pr_warn("Subscription rejected, no memory\n"); 292 pr_warn("Subscription rejected, no memory\n");
294 subscr_terminate(subscriber); 293 return -ENOMEM;
295 return NULL;
296 } 294 }
297 295
298 /* Initialize subscription object */ 296 /* Initialize subscription object */
@@ -306,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
306 (sub->seq.lower > sub->seq.upper)) { 304 (sub->seq.lower > sub->seq.upper)) {
307 pr_warn("Subscription rejected, illegal request\n"); 305 pr_warn("Subscription rejected, illegal request\n");
308 kfree(sub); 306 kfree(sub);
309 subscr_terminate(subscriber); 307 return -EINVAL;
310 return NULL;
311 } 308 }
312 INIT_LIST_HEAD(&sub->nameseq_list); 309 INIT_LIST_HEAD(&sub->nameseq_list);
313 list_add(&sub->subscription_list, &subscriber->subscription_list); 310 list_add(&sub->subscription_list, &subscriber->subscription_list);
@@ -320,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
320 (Handler)subscr_timeout, (unsigned long)sub); 317 (Handler)subscr_timeout, (unsigned long)sub);
321 k_start_timer(&sub->timer, sub->timeout); 318 k_start_timer(&sub->timer, sub->timeout);
322 } 319 }
323 320 *sub_p = sub;
324 return sub; 321 return 0;
325} 322}
326 323
327/* Handle one termination request for the subscriber */ 324/* Handle one termination request for the subscriber */
@@ -335,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
335 void *usr_data, void *buf, size_t len) 332 void *usr_data, void *buf, size_t len)
336{ 333{
337 struct tipc_subscriber *subscriber = usr_data; 334 struct tipc_subscriber *subscriber = usr_data;
338 struct tipc_subscription *sub; 335 struct tipc_subscription *sub = NULL;
339 336
340 spin_lock_bh(&subscriber->lock); 337 spin_lock_bh(&subscriber->lock);
341 sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber); 338 if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
339 spin_unlock_bh(&subscriber->lock);
340 subscr_terminate(subscriber);
341 return;
342 }
342 if (sub) 343 if (sub)
343 tipc_nametbl_subscribe(sub); 344 tipc_nametbl_subscribe(sub);
344 spin_unlock_bh(&subscriber->lock); 345 spin_unlock_bh(&subscriber->lock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ce6ec6c2f4de..94404f19f9de 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1787 goto out; 1787 goto out;
1788 1788
1789 err = mutex_lock_interruptible(&u->readlock); 1789 err = mutex_lock_interruptible(&u->readlock);
1790 if (err) { 1790 if (unlikely(err)) {
1791 err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); 1791 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1792 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1793 */
1794 err = noblock ? -EAGAIN : -ERESTARTSYS;
1792 goto out; 1795 goto out;
1793 } 1796 }
1794 1797
@@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1913 struct unix_sock *u = unix_sk(sk); 1916 struct unix_sock *u = unix_sk(sk);
1914 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); 1917 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1915 int copied = 0; 1918 int copied = 0;
1919 int noblock = flags & MSG_DONTWAIT;
1916 int check_creds = 0; 1920 int check_creds = 0;
1917 int target; 1921 int target;
1918 int err = 0; 1922 int err = 0;
@@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1928 goto out; 1932 goto out;
1929 1933
1930 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); 1934 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1931 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); 1935 timeo = sock_rcvtimeo(sk, noblock);
1932 1936
1933 /* Lock the socket to prevent queue disordering 1937 /* Lock the socket to prevent queue disordering
1934 * while sleeps in memcpy_tomsg 1938 * while sleeps in memcpy_tomsg
@@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1940 } 1944 }
1941 1945
1942 err = mutex_lock_interruptible(&u->readlock); 1946 err = mutex_lock_interruptible(&u->readlock);
1943 if (err) { 1947 if (unlikely(err)) {
1944 err = sock_intr_errno(timeo); 1948 /* recvmsg() in non blocking mode is supposed to return -EAGAIN
1949 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
1950 */
1951 err = noblock ? -EAGAIN : -ERESTARTSYS;
1945 goto out; 1952 goto out;
1946 } 1953 }
1947 1954
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c274179d60a2..2f7ddc3a59b4 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1221,7 +1221,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs
1221 return 0; 1221 return 0;
1222 1222
1223 uctx = nla_data(rt); 1223 uctx = nla_data(rt);
1224 return security_xfrm_policy_alloc(&pol->security, uctx); 1224 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1225} 1225}
1226 1226
1227static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 1227static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
@@ -1626,7 +1626,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1626 if (rt) { 1626 if (rt) {
1627 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1627 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1628 1628
1629 err = security_xfrm_policy_alloc(&ctx, uctx); 1629 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
1630 if (err) 1630 if (err)
1631 return err; 1631 return err;
1632 } 1632 }
@@ -1928,7 +1928,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1928 if (rt) { 1928 if (rt) {
1929 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1929 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
1930 1930
1931 err = security_xfrm_policy_alloc(&ctx, uctx); 1931 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
1932 if (err) 1932 if (err)
1933 return err; 1933 return err;
1934 } 1934 }
diff --git a/security/capability.c b/security/capability.c
index 8b4f24ae4338..21e2b9cae685 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -757,7 +757,8 @@ static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
757 757
758#ifdef CONFIG_SECURITY_NETWORK_XFRM 758#ifdef CONFIG_SECURITY_NETWORK_XFRM
759static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, 759static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp,
760 struct xfrm_user_sec_ctx *sec_ctx) 760 struct xfrm_user_sec_ctx *sec_ctx,
761 gfp_t gfp)
761{ 762{
762 return 0; 763 return 0;
763} 764}
diff --git a/security/security.c b/security/security.c
index 15b6928592ef..919cad93ac82 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1317,9 +1317,11 @@ void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
1317 1317
1318#ifdef CONFIG_SECURITY_NETWORK_XFRM 1318#ifdef CONFIG_SECURITY_NETWORK_XFRM
1319 1319
1320int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) 1320int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
1321 struct xfrm_user_sec_ctx *sec_ctx,
1322 gfp_t gfp)
1321{ 1323{
1322 return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx); 1324 return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp);
1323} 1325}
1324EXPORT_SYMBOL(security_xfrm_policy_alloc); 1326EXPORT_SYMBOL(security_xfrm_policy_alloc);
1325 1327
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4b34847208cc..b332e2cc0954 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -668,7 +668,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
668 if (flags[i] == SBLABEL_MNT) 668 if (flags[i] == SBLABEL_MNT)
669 continue; 669 continue;
670 rc = security_context_to_sid(mount_options[i], 670 rc = security_context_to_sid(mount_options[i],
671 strlen(mount_options[i]), &sid); 671 strlen(mount_options[i]), &sid, GFP_KERNEL);
672 if (rc) { 672 if (rc) {
673 printk(KERN_WARNING "SELinux: security_context_to_sid" 673 printk(KERN_WARNING "SELinux: security_context_to_sid"
674 "(%s) failed for (dev %s, type %s) errno=%d\n", 674 "(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2489,7 +2489,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
2489 if (flags[i] == SBLABEL_MNT) 2489 if (flags[i] == SBLABEL_MNT)
2490 continue; 2490 continue;
2491 len = strlen(mount_options[i]); 2491 len = strlen(mount_options[i]);
2492 rc = security_context_to_sid(mount_options[i], len, &sid); 2492 rc = security_context_to_sid(mount_options[i], len, &sid,
2493 GFP_KERNEL);
2493 if (rc) { 2494 if (rc) {
2494 printk(KERN_WARNING "SELinux: security_context_to_sid" 2495 printk(KERN_WARNING "SELinux: security_context_to_sid"
2495 "(%s) failed for (dev %s, type %s) errno=%d\n", 2496 "(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2893,7 +2894,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
2893 if (rc) 2894 if (rc)
2894 return rc; 2895 return rc;
2895 2896
2896 rc = security_context_to_sid(value, size, &newsid); 2897 rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
2897 if (rc == -EINVAL) { 2898 if (rc == -EINVAL) {
2898 if (!capable(CAP_MAC_ADMIN)) { 2899 if (!capable(CAP_MAC_ADMIN)) {
2899 struct audit_buffer *ab; 2900 struct audit_buffer *ab;
@@ -3050,7 +3051,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
3050 if (!value || !size) 3051 if (!value || !size)
3051 return -EACCES; 3052 return -EACCES;
3052 3053
3053 rc = security_context_to_sid((void *)value, size, &newsid); 3054 rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
3054 if (rc) 3055 if (rc)
3055 return rc; 3056 return rc;
3056 3057
@@ -5529,7 +5530,7 @@ static int selinux_setprocattr(struct task_struct *p,
5529 str[size-1] = 0; 5530 str[size-1] = 0;
5530 size--; 5531 size--;
5531 } 5532 }
5532 error = security_context_to_sid(value, size, &sid); 5533 error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
5533 if (error == -EINVAL && !strcmp(name, "fscreate")) { 5534 if (error == -EINVAL && !strcmp(name, "fscreate")) {
5534 if (!capable(CAP_MAC_ADMIN)) { 5535 if (!capable(CAP_MAC_ADMIN)) {
5535 struct audit_buffer *ab; 5536 struct audit_buffer *ab;
@@ -5638,7 +5639,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
5638 5639
5639static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) 5640static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
5640{ 5641{
5641 return security_context_to_sid(secdata, seclen, secid); 5642 return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
5642} 5643}
5643 5644
5644static void selinux_release_secctx(char *secdata, u32 seclen) 5645static void selinux_release_secctx(char *secdata, u32 seclen)
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 8ed8daf7f1ee..ce7852cf526b 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -134,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext,
134int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); 134int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
135 135
136int security_context_to_sid(const char *scontext, u32 scontext_len, 136int security_context_to_sid(const char *scontext, u32 scontext_len,
137 u32 *out_sid); 137 u32 *out_sid, gfp_t gfp);
138 138
139int security_context_to_sid_default(const char *scontext, u32 scontext_len, 139int security_context_to_sid_default(const char *scontext, u32 scontext_len,
140 u32 *out_sid, u32 def_sid, gfp_t gfp_flags); 140 u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 48c3cc94c168..9f0584710c85 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -10,7 +10,8 @@
10#include <net/flow.h> 10#include <net/flow.h>
11 11
12int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, 12int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
13 struct xfrm_user_sec_ctx *uctx); 13 struct xfrm_user_sec_ctx *uctx,
14 gfp_t gfp);
14int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, 15int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
15 struct xfrm_sec_ctx **new_ctxp); 16 struct xfrm_sec_ctx **new_ctxp);
16void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); 17void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 5122affe06a8..d60c0ee66387 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -576,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
576 if (length) 576 if (length)
577 goto out; 577 goto out;
578 578
579 length = security_context_to_sid(buf, size, &sid); 579 length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
580 if (length) 580 if (length)
581 goto out; 581 goto out;
582 582
@@ -731,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
731 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 731 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
732 goto out; 732 goto out;
733 733
734 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 734 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
735 GFP_KERNEL);
735 if (length) 736 if (length)
736 goto out; 737 goto out;
737 738
738 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); 739 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
740 GFP_KERNEL);
739 if (length) 741 if (length)
740 goto out; 742 goto out;
741 743
@@ -817,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
817 objname = namebuf; 819 objname = namebuf;
818 } 820 }
819 821
820 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 822 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
823 GFP_KERNEL);
821 if (length) 824 if (length)
822 goto out; 825 goto out;
823 826
824 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); 827 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
828 GFP_KERNEL);
825 if (length) 829 if (length)
826 goto out; 830 goto out;
827 831
@@ -878,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
878 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 882 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
879 goto out; 883 goto out;
880 884
881 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 885 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
886 GFP_KERNEL);
882 if (length) 887 if (length)
883 goto out; 888 goto out;
884 889
885 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); 890 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
891 GFP_KERNEL);
886 if (length) 892 if (length)
887 goto out; 893 goto out;
888 894
@@ -934,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
934 if (sscanf(buf, "%s %s", con, user) != 2) 940 if (sscanf(buf, "%s %s", con, user) != 2)
935 goto out; 941 goto out;
936 942
937 length = security_context_to_sid(con, strlen(con) + 1, &sid); 943 length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
938 if (length) 944 if (length)
939 goto out; 945 goto out;
940 946
@@ -994,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
994 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) 1000 if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
995 goto out; 1001 goto out;
996 1002
997 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 1003 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
1004 GFP_KERNEL);
998 if (length) 1005 if (length)
999 goto out; 1006 goto out;
1000 1007
1001 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); 1008 length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
1009 GFP_KERNEL);
1002 if (length) 1010 if (length)
1003 goto out; 1011 goto out;
1004 1012
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 5d0144ee8ed6..4bca49414a40 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1289,16 +1289,18 @@ out:
1289 * @scontext: security context 1289 * @scontext: security context
1290 * @scontext_len: length in bytes 1290 * @scontext_len: length in bytes
1291 * @sid: security identifier, SID 1291 * @sid: security identifier, SID
1292 * @gfp: context for the allocation
1292 * 1293 *
1293 * Obtains a SID associated with the security context that 1294 * Obtains a SID associated with the security context that
1294 * has the string representation specified by @scontext. 1295 * has the string representation specified by @scontext.
1295 * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient 1296 * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
1296 * memory is available, or 0 on success. 1297 * memory is available, or 0 on success.
1297 */ 1298 */
1298int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) 1299int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
1300 gfp_t gfp)
1299{ 1301{
1300 return security_context_to_sid_core(scontext, scontext_len, 1302 return security_context_to_sid_core(scontext, scontext_len,
1301 sid, SECSID_NULL, GFP_KERNEL, 0); 1303 sid, SECSID_NULL, gfp, 0);
1302} 1304}
1303 1305
1304/** 1306/**
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 0462cb3ff0a7..98b042630a9e 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -78,7 +78,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
78 * xfrm_user_sec_ctx context. 78 * xfrm_user_sec_ctx context.
79 */ 79 */
80static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, 80static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
81 struct xfrm_user_sec_ctx *uctx) 81 struct xfrm_user_sec_ctx *uctx,
82 gfp_t gfp)
82{ 83{
83 int rc; 84 int rc;
84 const struct task_security_struct *tsec = current_security(); 85 const struct task_security_struct *tsec = current_security();
@@ -94,7 +95,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
94 if (str_len >= PAGE_SIZE) 95 if (str_len >= PAGE_SIZE)
95 return -ENOMEM; 96 return -ENOMEM;
96 97
97 ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL); 98 ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
98 if (!ctx) 99 if (!ctx)
99 return -ENOMEM; 100 return -ENOMEM;
100 101
@@ -103,7 +104,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
103 ctx->ctx_len = str_len; 104 ctx->ctx_len = str_len;
104 memcpy(ctx->ctx_str, &uctx[1], str_len); 105 memcpy(ctx->ctx_str, &uctx[1], str_len);
105 ctx->ctx_str[str_len] = '\0'; 106 ctx->ctx_str[str_len] = '\0';
106 rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid); 107 rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
107 if (rc) 108 if (rc)
108 goto err; 109 goto err;
109 110
@@ -282,9 +283,10 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
282 * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. 283 * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
283 */ 284 */
284int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, 285int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
285 struct xfrm_user_sec_ctx *uctx) 286 struct xfrm_user_sec_ctx *uctx,
287 gfp_t gfp)
286{ 288{
287 return selinux_xfrm_alloc_user(ctxp, uctx); 289 return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
288} 290}
289 291
290/* 292/*
@@ -332,7 +334,7 @@ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
332int selinux_xfrm_state_alloc(struct xfrm_state *x, 334int selinux_xfrm_state_alloc(struct xfrm_state *x,
333 struct xfrm_user_sec_ctx *uctx) 335 struct xfrm_user_sec_ctx *uctx)
334{ 336{
335 return selinux_xfrm_alloc_user(&x->security, uctx); 337 return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
336} 338}
337 339
338/* 340/*
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 7a20897d33db..7403f348ed14 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
133 kfree(data); 133 kfree(data);
134 } 134 }
135 snd_card_unref(compr->card); 135 snd_card_unref(compr->card);
136 return 0; 136 return ret;
137} 137}
138 138
139static int snd_compr_free(struct inode *inode, struct file *f) 139static int snd_compr_free(struct inode *inode, struct file *f)
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index ed6f199f8a38..4cf3200e988b 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -238,11 +238,21 @@ void set_cs4245_adc_params(struct oxygen *chip,
238 cs4245_write_spi(chip, CS4245_MCLK_FREQ); 238 cs4245_write_spi(chip, CS4245_MCLK_FREQ);
239} 239}
240 240
241static inline unsigned int shift_bits(unsigned int value,
242 unsigned int shift_from,
243 unsigned int shift_to,
244 unsigned int mask)
245{
246 if (shift_from < shift_to)
247 return (value << (shift_to - shift_from)) & mask;
248 else
249 return (value >> (shift_from - shift_to)) & mask;
250}
251
241unsigned int adjust_dg_dac_routing(struct oxygen *chip, 252unsigned int adjust_dg_dac_routing(struct oxygen *chip,
242 unsigned int play_routing) 253 unsigned int play_routing)
243{ 254{
244 struct dg *data = chip->model_data; 255 struct dg *data = chip->model_data;
245 unsigned int routing = 0;
246 256
247 switch (data->output_sel) { 257 switch (data->output_sel) {
248 case PLAYBACK_DST_HP: 258 case PLAYBACK_DST_HP:
@@ -252,15 +262,23 @@ unsigned int adjust_dg_dac_routing(struct oxygen *chip,
252 OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK); 262 OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK);
253 break; 263 break;
254 case PLAYBACK_DST_MULTICH: 264 case PLAYBACK_DST_MULTICH:
255 routing = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) |
256 (2 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
257 (1 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
258 (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
259 oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING, 265 oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
260 OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK); 266 OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK);
261 break; 267 break;
262 } 268 }
263 return routing; 269 return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
270 shift_bits(play_routing,
271 OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
272 OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
273 OXYGEN_PLAY_DAC1_SOURCE_MASK) |
274 shift_bits(play_routing,
275 OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
276 OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
277 OXYGEN_PLAY_DAC2_SOURCE_MASK) |
278 shift_bits(play_routing,
279 OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
280 OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
281 OXYGEN_PLAY_DAC3_SOURCE_MASK);
264} 282}
265 283
266void dump_cs4245_registers(struct oxygen *chip, 284void dump_cs4245_registers(struct oxygen *chip,
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d4c83c60b9b2..97d86d828190 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1593,6 +1593,7 @@ static void init_params(struct params *p, const char *name, int argc, const char
1593 p->data_rand_walk = true; 1593 p->data_rand_walk = true;
1594 p->nr_loops = -1; 1594 p->nr_loops = -1;
1595 p->init_random = true; 1595 p->init_random = true;
1596 p->run_all = argc == 1;
1596} 1597}
1597 1598
1598static int run_bench_numa(const char *name, const char **argv) 1599static int run_bench_numa(const char *name, const char **argv)
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index e47f90cc7b98..8a987d252780 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -76,7 +76,7 @@ static struct collection collections[] = {
76 76
77/* Iterate over all benchmarks within a collection: */ 77/* Iterate over all benchmarks within a collection: */
78#define for_each_bench(coll, bench) \ 78#define for_each_bench(coll, bench) \
79 for (bench = coll->benchmarks; bench->name; bench++) 79 for (bench = coll->benchmarks; bench && bench->name; bench++)
80 80
81static void dump_benchmarks(struct collection *coll) 81static void dump_benchmarks(struct collection *coll)
82{ 82{