aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi5
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts4
-rw-r--r--arch/arm/kernel/fiq.c3
-rw-r--r--arch/arm/kernel/machine_kexec.c1
-rw-r--r--arch/arm/kvm/coproc.c26
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c36
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c5
-rw-r--r--arch/arm/mach-prima2/common.c2
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/plat-samsung/init.c5
-rw-r--r--arch/arm/xen/enlighten.c1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/kernel/lparcfg.c22
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/mm/mmap.c8
-rw-r--r--arch/x86/xen/setup.c22
-rw-r--r--arch/x86/xen/smp.c11
-rw-r--r--drivers/acpi/video.c11
-rw-r--r--drivers/ata/libata-pmp.c12
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c58
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c58
-rw-r--r--drivers/iio/light/adjd_s311.c3
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/mouse/elantech.c44
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/serio/Kconfig3
-rw-r--r--drivers/input/tablet/wacom_wac.c10
-rw-r--r--drivers/irqchip/irq-sirfsoc.c18
-rw-r--r--drivers/isdn/mISDN/dsp_core.c4
-rw-r--r--drivers/md/dm-cache-policy-mq.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c119
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c3
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c20
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/phy/realtek.c4
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/hso.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c33
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c3
-rw-r--r--drivers/net/wireless/zd1201.c4
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c66
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h2
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c16
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c8
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c14
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/tty/hvc/hvsi_lib.c4
-rw-r--r--drivers/usb/host/ohci-pci.c7
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c2
-rw-r--r--drivers/xen/events.c13
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/bio.c20
-rw-r--r--fs/dcache.c68
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/hugetlbfs/inode.c8
-rw-r--r--fs/jfs/jfs_dtree.c31
-rw-r--r--fs/namei.c16
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nilfs2/segbuf.c5
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/proc/fd.c2
-rw-r--r--include/linux/dcache.h20
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/lockref.h71
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/nsproxy.h6
-rw-r--r--include/linux/regmap.h1
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/net/busy_poll.h1
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/mac80211.h1
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/xfrm.h6
-rw-r--r--include/uapi/linux/cm4000_cs.h1
-rw-r--r--include/uapi/linux/ip.h34
-rw-r--r--init/Kconfig2
-rw-r--r--ipc/msg.c5
-rw-r--r--kernel/cgroup.c19
-rw-r--r--kernel/cpuset.c14
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/nsproxy.c27
-rw-r--r--kernel/pid_namespace.c4
-rw-r--r--kernel/time/timer_list.c41
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/lz4/lz4_compress.c4
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lz4/lz4hc_compress.c4
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/mremap.c21
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.h2
-rw-r--r--net/batman-adv/unicast.c10
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_mdb.c14
-rw-r--r--net/bridge/br_multicast.c261
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h57
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/core/flow_dissector.c11
-rw-r--r--net/core/scm.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ipip.c5
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/xfrm4_output.c16
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ndisc.c4
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/xfrm6_output.c21
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/mac80211/ibss.c34
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c3
-rw-r--r--net/netlink/genetlink.c74
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/wireless/nl80211.c22
-rw-r--r--net/wireless/sme.c10
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_state.c7
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c8
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/pci/hda/patch_realtek.c1
217 files changed, 1800 insertions, 807 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15356aca938c..7f9d4f53882c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2953,7 +2953,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2953 improve throughput, but will also increase the 2953 improve throughput, but will also increase the
2954 amount of memory reserved for use by the client. 2954 amount of memory reserved for use by the client.
2955 2955
2956 swapaccount[=0|1] 2956 swapaccount=[0|1]
2957 [KNL] Enable accounting of swap in memory resource 2957 [KNL] Enable accounting of swap in memory resource
2958 controller if no parameter or 1 is given or disable 2958 controller if no parameter or 1 is given or disable
2959 it if 0 is given (See Documentation/cgroups/memory.txt) 2959 it if 0 is given (See Documentation/cgroups/memory.txt)
diff --git a/MAINTAINERS b/MAINTAINERS
index 229c66b12cc2..b140c8123098 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5884,7 +5884,7 @@ F: drivers/i2c/busses/i2c-omap.c
5884F: include/linux/i2c-omap.h 5884F: include/linux/i2c-omap.h
5885 5885
5886OMAP DEVICE TREE SUPPORT 5886OMAP DEVICE TREE SUPPORT
5887M: Benoît Cousson <b-cousson@ti.com> 5887M: Benoît Cousson <bcousson@baylibre.com>
5888M: Tony Lindgren <tony@atomide.com> 5888M: Tony Lindgren <tony@atomide.com>
5889L: linux-omap@vger.kernel.org 5889L: linux-omap@vger.kernel.org
5890L: devicetree@vger.kernel.org 5890L: devicetree@vger.kernel.org
@@ -5964,14 +5964,14 @@ S: Maintained
5964F: drivers/char/hw_random/omap-rng.c 5964F: drivers/char/hw_random/omap-rng.c
5965 5965
5966OMAP HWMOD SUPPORT 5966OMAP HWMOD SUPPORT
5967M: Benoît Cousson <b-cousson@ti.com> 5967M: Benoît Cousson <bcousson@baylibre.com>
5968M: Paul Walmsley <paul@pwsan.com> 5968M: Paul Walmsley <paul@pwsan.com>
5969L: linux-omap@vger.kernel.org 5969L: linux-omap@vger.kernel.org
5970S: Maintained 5970S: Maintained
5971F: arch/arm/mach-omap2/omap_hwmod.* 5971F: arch/arm/mach-omap2/omap_hwmod.*
5972 5972
5973OMAP HWMOD DATA FOR OMAP4-BASED DEVICES 5973OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
5974M: Benoît Cousson <b-cousson@ti.com> 5974M: Benoît Cousson <bcousson@baylibre.com>
5975L: linux-omap@vger.kernel.org 5975L: linux-omap@vger.kernel.org
5976S: Maintained 5976S: Maintained
5977F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c 5977F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -6066,7 +6066,7 @@ M: Rob Herring <rob.herring@calxeda.com>
6066M: Pawel Moll <pawel.moll@arm.com> 6066M: Pawel Moll <pawel.moll@arm.com>
6067M: Mark Rutland <mark.rutland@arm.com> 6067M: Mark Rutland <mark.rutland@arm.com>
6068M: Stephen Warren <swarren@wwwdotorg.org> 6068M: Stephen Warren <swarren@wwwdotorg.org>
6069M: Ian Campbell <ian.campbell@citrix.com> 6069M: Ian Campbell <ijc+devicetree@hellion.org.uk>
6070L: devicetree@vger.kernel.org 6070L: devicetree@vger.kernel.org
6071S: Maintained 6071S: Maintained
6072F: Documentation/devicetree/ 6072F: Documentation/devicetree/
diff --git a/Makefile b/Makefile
index a5a55f4547c6..369882e4fc77 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 11 2PATCHLEVEL = 11
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Linux for Workgroups 5NAME = Linux for Workgroups
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
39 ld.a r2,[r0,4] 39 ld.a r2,[r0,4]
40 sub r12,r6,r7 40 sub r12,r6,r7
41 bic r12,r12,r6 41 bic r12,r12,r6
42#ifdef __LITTLE_ENDIAN__
42 and r7,r12,r4 43 and r7,r12,r4
43 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 44 breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
44 b .Lfound_char ; Likewise this one. 45 b .Lfound_char ; Likewise this one.
46#else
47 and r12,r12,r4
48 breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
49 lsr_s r12,r12,7
50 bic r2,r7,r6
51 b.d .Lfound_char_b
52 and_s r2,r2,r12
53#endif
45; /* We require this code address to be unaligned for speed... */ 54; /* We require this code address to be unaligned for speed... */
46.Laligned: 55.Laligned:
47 ld_s r2,[r0] 56 ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
95 lsr r7,r7,7 104 lsr r7,r7,7
96 105
97 bic r2,r7,r6 106 bic r2,r7,r6
107.Lfound_char_b:
98 norm r2,r2 108 norm r2,r2
99 sub_s r0,r0,4 109 sub_s r0,r0,4
100 asr_s r2,r2,3 110 asr_s r2,r2,3
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c6a6a0..3d77dbe406f4 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; 14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
15 15
16 chosen { 16 chosen {
17 bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; 17 bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
18 }; 18 };
19 19
20 memory { 20 memory {
21 reg = <0x20000000 0x10000000>; 21 reg = <0x20000000 0x8000000>;
22 }; 22 };
23 23
24 clocks { 24 clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855b2058..49e3c45818c2 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
94 94
95 usb0: ohci@00600000 { 95 usb0: ohci@00600000 {
96 status = "okay"; 96 status = "okay";
97 num-ports = <2>; 97 num-ports = <3>;
98 atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW 98 atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
99 &pioD 19 GPIO_ACTIVE_LOW
99 &pioD 20 GPIO_ACTIVE_LOW 100 &pioD 20 GPIO_ACTIVE_LOW
100 >; 101 >;
101 }; 102 };
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b33a26..40e6fb280333 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -830,6 +830,8 @@
830 regulator-max-microvolt = <5000000>; 830 regulator-max-microvolt = <5000000>;
831 enable-active-high; 831 enable-active-high;
832 gpio = <&gpio 24 0>; /* PD0 */ 832 gpio = <&gpio 24 0>; /* PD0 */
833 regulator-always-on;
834 regulator-boot-on;
833 }; 835 };
834 }; 836 };
835 837
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901b0227..37c93d3c4812 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -412,6 +412,8 @@
412 regulator-max-microvolt = <5000000>; 412 regulator-max-microvolt = <5000000>;
413 enable-active-high; 413 enable-active-high;
414 gpio = <&gpio 170 0>; /* PV2 */ 414 gpio = <&gpio 170 0>; /* PV2 */
415 regulator-always-on;
416 regulator-boot-on;
415 }; 417 };
416 }; 418 };
417 419
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94db280..a3d0ebad78a1 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -588,6 +588,8 @@
588 regulator-max-microvolt = <5000000>; 588 regulator-max-microvolt = <5000000>;
589 enable-active-high; 589 enable-active-high;
590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ 590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
591 regulator-always-on;
592 regulator-boot-on;
591 }; 593 };
592 594
593 vbus3_reg: regulator@3 { 595 vbus3_reg: regulator@3 {
@@ -598,6 +600,8 @@
598 regulator-max-microvolt = <5000000>; 600 regulator-max-microvolt = <5000000>;
599 enable-active-high; 601 enable-active-high;
600 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ 602 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
603 regulator-always-on;
604 regulator-boot-on;
601 }; 605 };
602 }; 606 };
603 607
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index fc7920288a3d..918875d96d5d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length)
89 89
90 memcpy(base + offset, start, length); 90 memcpy(base + offset, start, length);
91 if (!cache_is_vipt_nonaliasing()) 91 if (!cache_is_vipt_nonaliasing())
92 flush_icache_range(base + offset, offset + length); 92 flush_icache_range((unsigned long)base + offset, offset +
93 length);
93 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); 94 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
94} 95}
95 96
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index d7c82df69243..57221e349a7c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
82 crash_save_cpu(&regs, smp_processor_id()); 82 crash_save_cpu(&regs, smp_processor_id());
83 flush_cache_all(); 83 flush_cache_all();
84 84
85 set_cpu_online(smp_processor_id(), false);
85 atomic_dec(&waiting_for_crash_ipi); 86 atomic_dec(&waiting_for_crash_ipi);
86 while (1) 87 while (1)
87 cpu_relax(); 88 cpu_relax();
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a5199070430..db9cf692d4dd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
146#define access_pmintenclr pm_fake 146#define access_pmintenclr pm_fake
147 147
148/* Architected CP15 registers. 148/* Architected CP15 registers.
149 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 149 * CRn denotes the primary register number, but is copied to the CRm in the
150 * user space API for 64-bit register access in line with the terminology used
151 * in the ARM ARM.
152 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
153 * registers preceding 32-bit ones.
150 */ 154 */
151static const struct coproc_reg cp15_regs[] = { 155static const struct coproc_reg cp15_regs[] = {
152 /* CSSELR: swapped by interrupt.S. */ 156 /* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
154 NULL, reset_unknown, c0_CSSELR }, 158 NULL, reset_unknown, c0_CSSELR },
155 159
156 /* TTBR0/TTBR1: swapped by interrupt.S. */ 160 /* TTBR0/TTBR1: swapped by interrupt.S. */
157 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 161 { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
158 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 162 { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
159 163
160 /* TTBCR: swapped by interrupt.S. */ 164 /* TTBCR: swapped by interrupt.S. */
161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 165 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
182 NULL, reset_unknown, c6_IFAR }, 186 NULL, reset_unknown, c6_IFAR },
183 187
184 /* PAR swapped by interrupt.S */ 188 /* PAR swapped by interrupt.S */
185 { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 189 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
186 190
187 /* 191 /*
188 * DC{C,I,CI}SW operations: 192 * DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
399 | KVM_REG_ARM_OPC1_MASK)) 403 | KVM_REG_ARM_OPC1_MASK))
400 return false; 404 return false;
401 params->is_64bit = true; 405 params->is_64bit = true;
402 params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 406 /* CRm to CRn: see cp15_to_index for details */
407 params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
403 >> KVM_REG_ARM_CRM_SHIFT); 408 >> KVM_REG_ARM_CRM_SHIFT);
404 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 409 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
405 >> KVM_REG_ARM_OPC1_SHIFT); 410 >> KVM_REG_ARM_OPC1_SHIFT);
406 params->Op2 = 0; 411 params->Op2 = 0;
407 params->CRn = 0; 412 params->CRm = 0;
408 return true; 413 return true;
409 default: 414 default:
410 return false; 415 return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
898 if (reg->is_64) { 903 if (reg->is_64) {
899 val |= KVM_REG_SIZE_U64; 904 val |= KVM_REG_SIZE_U64;
900 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 905 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
901 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 906 /*
907 * CRn always denotes the primary coproc. reg. nr. for the
908 * in-kernel representation, but the user space API uses the
909 * CRm for the encoding, because it is modelled after the
910 * MRRC/MCRR instructions: see the ARM ARM rev. c page
911 * B3-1445
912 */
913 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
902 } else { 914 } else {
903 val |= KVM_REG_SIZE_U32; 915 val |= KVM_REG_SIZE_U32;
904 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 916 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e4799..0461d5c8d3de 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
135 return -1; 135 return -1;
136 if (i1->CRn != i2->CRn) 136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn; 137 return i1->CRn - i2->CRn;
138 if (i1->is_64 != i2->is_64)
139 return i2->is_64 - i1->is_64;
138 if (i1->CRm != i2->CRm) 140 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm; 141 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1) 142 if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
145 147
146#define CRn(_x) .CRn = _x 148#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x 149#define CRm(_x) .CRm = _x
150#define CRm64(_x) .CRn = _x, .CRm = 0
148#define Op1(_x) .Op1 = _x 151#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x 152#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true 153#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0cf..cf93472b9dd6 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
114 114
115/* 115/*
116 * A15-specific CP15 registers. 116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 117 * CRn denotes the primary register number, but is copied to the CRm in the
118 * user space API for 64-bit register access in line with the terminology used
119 * in the ARM ARM.
120 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
121 * registers preceding 32-bit ones.
118 */ 122 */
119static const struct coproc_reg a15_regs[] = { 123static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */ 124 /* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7a2833..0c25d9487d53 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
64 struct kvm_exit_mmio *mmio) 64 struct kvm_exit_mmio *mmio)
65{ 65{
66 unsigned long rt, len; 66 unsigned long rt;
67 int len;
67 bool is_write, sign_extend; 68 bool is_write, sign_extend;
68 69
69 if (kvm_vcpu_dabt_isextabt(vcpu)) { 70 if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b4..0988d9e04dd4 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85 return p; 85 return p;
86} 86}
87 87
88static bool page_empty(void *ptr)
89{
90 struct page *ptr_page = virt_to_page(ptr);
91 return page_count(ptr_page) == 1;
92}
93
88static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 94static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
89{ 95{
90 pmd_t *pmd_table = pmd_offset(pud, 0); 96 pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
103 put_page(virt_to_page(pmd)); 109 put_page(virt_to_page(pmd));
104} 110}
105 111
106static bool pmd_empty(pmd_t *pmd)
107{
108 struct page *pmd_page = virt_to_page(pmd);
109 return page_count(pmd_page) == 1;
110}
111
112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
113{ 113{
114 if (pte_present(*pte)) { 114 if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
118 } 118 }
119} 119}
120 120
121static bool pte_empty(pte_t *pte)
122{
123 struct page *pte_page = virt_to_page(pte);
124 return page_count(pte_page) == 1;
125}
126
127static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 121static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size) 122 unsigned long long start, u64 size)
129{ 123{
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
132 pmd_t *pmd; 126 pmd_t *pmd;
133 pte_t *pte; 127 pte_t *pte;
134 unsigned long long addr = start, end = start + size; 128 unsigned long long addr = start, end = start + size;
135 u64 range; 129 u64 next;
136 130
137 while (addr < end) { 131 while (addr < end) {
138 pgd = pgdp + pgd_index(addr); 132 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr); 133 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) { 134 if (pud_none(*pud)) {
141 addr += PUD_SIZE; 135 addr = pud_addr_end(addr, end);
142 continue; 136 continue;
143 } 137 }
144 138
145 pmd = pmd_offset(pud, addr); 139 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) { 140 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE; 141 addr = pmd_addr_end(addr, end);
148 continue; 142 continue;
149 } 143 }
150 144
151 pte = pte_offset_kernel(pmd, addr); 145 pte = pte_offset_kernel(pmd, addr);
152 clear_pte_entry(kvm, pte, addr); 146 clear_pte_entry(kvm, pte, addr);
153 range = PAGE_SIZE; 147 next = addr + PAGE_SIZE;
154 148
155 /* If we emptied the pte, walk back up the ladder */ 149 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) { 150 if (page_empty(pte)) {
157 clear_pmd_entry(kvm, pmd, addr); 151 clear_pmd_entry(kvm, pmd, addr);
158 range = PMD_SIZE; 152 next = pmd_addr_end(addr, end);
159 if (pmd_empty(pmd)) { 153 if (page_empty(pmd) && !page_empty(pud)) {
160 clear_pud_entry(kvm, pud, addr); 154 clear_pud_entry(kvm, pud, addr);
161 range = PUD_SIZE; 155 next = pud_addr_end(addr, end);
162 } 156 }
163 } 157 }
164 158
165 addr += range; 159 addr = next;
166 } 160 }
167} 161}
168 162
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee6626aac..916e5a142917 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), 227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), 228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), 229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
230 CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
231 CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
230 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), 232 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
231 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), 233 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
232 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), 234 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef81..139e42da25f0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
75 .parts = davinci_nand_partitions, 75 .parts = davinci_nand_partitions,
76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
77 .ecc_mode = NAND_ECC_HW_SYNDROME, 77 .ecc_mode = NAND_ECC_HW_SYNDROME,
78 .ecc_bits = 4,
78 .bbt_options = NAND_BBT_USE_FLASH, 79 .bbt_options = NAND_BBT_USE_FLASH,
79}; 80};
80 81
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb2..fa4bfaf952d8 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
153 .parts = davinci_evm_nandflash_partition, 153 .parts = davinci_evm_nandflash_partition,
154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
155 .ecc_mode = NAND_ECC_HW, 155 .ecc_mode = NAND_ECC_HW,
156 .ecc_bits = 1,
156 .bbt_options = NAND_BBT_USE_FLASH, 157 .bbt_options = NAND_BBT_USE_FLASH,
157 .timing = &davinci_evm_nandflash_timing, 158 .timing = &davinci_evm_nandflash_timing,
158}; 159};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc1..0c005e876cac 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
90 .parts = davinci_nand_partitions, 90 .parts = davinci_nand_partitions,
91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
92 .ecc_mode = NAND_ECC_HW, 92 .ecc_mode = NAND_ECC_HW,
93 .ecc_bits = 1,
93 .options = 0, 94 .options = 0,
94}; 95};
95 96
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf565..808233b60e3d 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
88 .parts = davinci_ntosd2_nandflash_partition, 88 .parts = davinci_ntosd2_nandflash_partition,
89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
90 .ecc_mode = NAND_ECC_HW, 90 .ecc_mode = NAND_ECC_HW,
91 .ecc_bits = 1,
91 .bbt_options = NAND_BBT_USE_FLASH, 92 .bbt_options = NAND_BBT_USE_FLASH,
92}; 93};
93 94
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87e4e95..827d15009a86 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
122}; 122};
123 123
124static struct musb_hdrc_platform_data tusb_data = { 124static struct musb_hdrc_platform_data tusb_data = {
125#ifdef CONFIG_USB_GADGET_MUSB_HDRC
126 .mode = MUSB_OTG, 125 .mode = MUSB_OTG,
127#else
128 .mode = MUSB_HOST,
129#endif
130 .set_power = tusb_set_power, 126 .set_power = tusb_set_power,
131 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ 127 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
132 .power = 100, /* Max 100 mA VBUS for host mode */ 128 .power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68ea678a..7735105561d8 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
85 85
86static struct omap_musb_board_data musb_board_data = { 86static struct omap_musb_board_data musb_board_data = {
87 .interface_type = MUSB_INTERFACE_ULPI, 87 .interface_type = MUSB_INTERFACE_ULPI,
88 .mode = MUSB_PERIPHERAL, 88 .mode = MUSB_OTG,
89 .power = 0, 89 .power = 0,
90}; 90};
91 91
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de2708cf2..bc897231bd10 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
38}; 38};
39 39
40static struct musb_hdrc_platform_data musb_plat = { 40static struct musb_hdrc_platform_data musb_plat = {
41#ifdef CONFIG_USB_GADGET_MUSB_HDRC
42 .mode = MUSB_OTG, 41 .mode = MUSB_OTG,
43#else 42
44 .mode = MUSB_HOST,
45#endif
46 /* .clock is set dynamically */ 43 /* .clock is set dynamically */
47 .config = &musb_config, 44 .config = &musb_config,
48 45
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 2c70f74fed5d..e110b6d4ae8c 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
42 42
43DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") 43DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
44 /* Maintainer: Barry Song <baohua.song@csr.com> */ 44 /* Maintainer: Barry Song <baohua.song@csr.com> */
45 .nr_irqs = 128,
46 .map_io = sirfsoc_map_io, 45 .map_io = sirfsoc_map_io,
47 .init_time = sirfsoc_init_time, 46 .init_time = sirfsoc_init_time,
48 .init_late = sirfsoc_init_late, 47 .init_late = sirfsoc_init_late,
@@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
59 58
60DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") 59DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
61 /* Maintainer: Barry Song <baohua.song@csr.com> */ 60 /* Maintainer: Barry Song <baohua.song@csr.com> */
62 .nr_irqs = 128,
63 .map_io = sirfsoc_map_io, 61 .map_io = sirfsoc_map_io,
64 .init_time = sirfsoc_init_time, 62 .init_time = sirfsoc_init_time,
65 .dma_zone_size = SZ_256M, 63 .dma_zone_size = SZ_256M,
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5c2cab8fda..cd2c88e7a8f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -809,15 +809,18 @@ config KUSER_HELPERS
809 the CPU type fitted to the system. This permits binaries to be 809 the CPU type fitted to the system. This permits binaries to be
810 run on ARMv4 through to ARMv7 without modification. 810 run on ARMv4 through to ARMv7 without modification.
811 811
812 See Documentation/arm/kernel_user_helpers.txt for details.
813
812 However, the fixed address nature of these helpers can be used 814 However, the fixed address nature of these helpers can be used
813 by ROP (return orientated programming) authors when creating 815 by ROP (return orientated programming) authors when creating
814 exploits. 816 exploits.
815 817
816 If all of the binaries and libraries which run on your platform 818 If all of the binaries and libraries which run on your platform
817 are built specifically for your platform, and make no use of 819 are built specifically for your platform, and make no use of
818 these helpers, then you can turn this option off. However, 820 these helpers, then you can turn this option off to hinder
819 when such an binary or library is run, it will receive a SIGILL 821 such exploits. However, in that case, if a binary or library
820 signal, which will terminate the program. 822 relying on those helpers is run, it will receive a SIGILL signal,
823 which will terminate the program.
821 824
822 Say N here only if you are absolutely certain that you do not 825 Say N here only if you are absolutely certain that you do not
823 need these helpers; otherwise, the safe option is to say Y. 826 need these helpers; otherwise, the safe option is to say Y.
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c4619caa5..50a3ea0037db 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
55 55
56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); 56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
57 57
58 if (cpu->map_io == NULL || cpu->init == NULL) { 58 if (cpu->init == NULL) {
59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); 59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
60 panic("Unsupported Samsung CPU"); 60 panic("Unsupported Samsung CPU");
61 } 61 }
62 62
63 cpu->map_io(); 63 if (cpu->map_io)
64 cpu->map_io();
64} 65}
65 66
66/* s3c24xx_init_clocks 67/* s3c24xx_init_clocks
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c9770ba5c7df..8a6295c86209 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
170 per_cpu(xen_vcpu, cpu) = vcpup; 170 per_cpu(xen_vcpu, cpu) = vcpup;
171 171
172 enable_percpu_irq(xen_events_irq, 0); 172 enable_percpu_irq(xen_events_irq, 0);
173 put_cpu();
173} 174}
174 175
175static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) 176static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de4163eba..b25763bc0ec4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
42#define TPIDR_EL1 18 /* Thread ID, Privileged */ 42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45#define PAR_EL1 21 /* Physical Address Register */
45/* 32bit specific registers. Keep them at the end of the range */ 46/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */ 47#define DACR32_EL2 22 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */ 48#define IFSR32_EL2 23 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ 49#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ 50#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ 51#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ 52#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27 53#define NR_SYS_REGS 28
53 54
54/* 32bit mapping */ 55/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 56#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ 70#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ 71#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ 72#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
73#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
74#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ 75#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ 76#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ 77#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d73956864..0859a4ddd1e7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
129 struct kvm_mmu_memory_cache mmu_page_cache; 129 struct kvm_mmu_memory_cache mmu_page_cache;
130 130
131 /* Target CPU and feature flags */ 131 /* Target CPU and feature flags */
132 u32 target; 132 int target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134 134
135 /* Detect first run of a vcpu */ 135 /* Detect first run of a vcpu */
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..12e6ccb88691 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
107static int 107static int
108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
109{ 109{
110 int mapping = (*event_map)[config]; 110 int mapping;
111
112 if (config >= PERF_COUNT_HW_MAX)
113 return -EINVAL;
114
115 mapping = (*event_map)[config];
111 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
112} 117}
113 118
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
317 struct hw_perf_event fake_event = event->hw; 322 struct hw_perf_event fake_event = event->hw;
318 struct pmu *leader_pmu = event->group_leader->pmu; 323 struct pmu *leader_pmu = event->group_leader->pmu;
319 324
325 if (is_software_event(event))
326 return 1;
327
320 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 328 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
321 return 1; 329 return 1;
322 330
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3d8b72..1ac0bbbdddb2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
214 mrs x21, tpidr_el1 214 mrs x21, tpidr_el1
215 mrs x22, amair_el1 215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1 216 mrs x23, cntkctl_el1
217 mrs x24, par_el1
217 218
218 stp x4, x5, [x3] 219 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16] 220 stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
225 stp x18, x19, [x3, #112] 226 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128] 227 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144] 228 stp x22, x23, [x3, #144]
229 str x24, [x3, #160]
228.endm 230.endm
229 231
230.macro restore_sysregs 232.macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
243 ldp x18, x19, [x3, #112] 245 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128] 246 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144] 247 ldp x22, x23, [x3, #144]
248 ldr x24, [x3, #160]
246 249
247 msr vmpidr_el2, x4 250 msr vmpidr_el2, x4
248 msr csselr_el1, x5 251 msr csselr_el1, x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
264 msr tpidr_el1, x21 267 msr tpidr_el1, x21
265 msr amair_el1, x22 268 msr amair_el1, x22
266 msr cntkctl_el1, x23 269 msr cntkctl_el1, x23
270 msr par_el1, x24
267.endm 271.endm
268 272
269.macro skip_32bit_state tmp, target 273.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
600 604
601// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 605// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
602ENTRY(__kvm_tlb_flush_vmid_ipa) 606ENTRY(__kvm_tlb_flush_vmid_ipa)
607 dsb ishst
608
603 kern_hyp_va x0 609 kern_hyp_va x0
604 ldr x2, [x0, #KVM_VTTBR] 610 ldr x2, [x0, #KVM_VTTBR]
605 msr vttbr_el2, x2 611 msr vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
621ENDPROC(__kvm_tlb_flush_vmid_ipa) 627ENDPROC(__kvm_tlb_flush_vmid_ipa)
622 628
623ENTRY(__kvm_flush_vm_context) 629ENTRY(__kvm_flush_vm_context)
630 dsb ishst
624 tlbi alle1is 631 tlbi alle1is
625 ic ialluis 632 ic ialluis
626 dsb sy 633 dsb sy
@@ -753,6 +760,10 @@ el1_trap:
753 */ 760 */
754 tbnz x1, #7, 1f // S1PTW is set 761 tbnz x1, #7, 1f // S1PTW is set
755 762
763 /* Preserve PAR_EL1 */
764 mrs x3, par_el1
765 push x3, xzr
766
756 /* 767 /*
757 * Permission fault, HPFAR_EL2 is invalid. 768 * Permission fault, HPFAR_EL2 is invalid.
758 * Resolve the IPA the hard way using the guest VA. 769 * Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
766 777
767 /* Read result */ 778 /* Read result */
768 mrs x3, par_el1 779 mrs x3, par_el1
780 pop x0, xzr // Restore PAR_EL1 from the stack
781 msr par_el1, x0
769 tbnz x3, #0, 3f // Bail out if we failed the translation 782 tbnz x3, #0, 3f // Bail out if we failed the translation
770 ubfx x3, x3, #12, #36 // Extract IPA 783 ubfx x3, x3, #12, #36 // Extract IPA
771 lsl x3, x3, #4 // and present it like HPFAR 784 lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 94923609753b..02e9d09e1d80 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
211 /* FAR_EL1 */ 211 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 }, 213 NULL, reset_unknown, FAR_EL1 },
214 /* PAR_EL1 */
215 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
216 NULL, reset_unknown, PAR_EL1 },
214 217
215 /* PMINTENSET_EL1 */ 218 /* PMINTENSET_EL1 */
216 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659ccf9f..46048d24328c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
803 dec_insn.next_pc_inc; 803 dec_insn.next_pc_inc;
804 return 1; 804 return 1;
805 break; 805 break;
806#ifdef CONFIG_CPU_CAVIUM_OCTEON
807 case lwc2_op: /* This is bbit0 on Octeon */
808 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
809 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
810 else
811 *contpc = regs->cp0_epc + 8;
812 return 1;
813 case ldc2_op: /* This is bbit032 on Octeon */
814 if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
815 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
816 else
817 *contpc = regs->cp0_epc + 8;
818 return 1;
819 case swc2_op: /* This is bbit1 on Octeon */
820 if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
821 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
822 else
823 *contpc = regs->cp0_epc + 8;
824 return 1;
825 case sdc2_op: /* This is bbit132 on Octeon */
826 if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
827 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
828 else
829 *contpc = regs->cp0_epc + 8;
830 return 1;
831#endif
806 case cop0_op: 832 case cop0_op:
807 case cop1_op: 833 case cop1_op:
808 case cop2_op: 834 case cop2_op:
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index dbd9d3c991e8..9cf59816d3e9 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -979,6 +979,7 @@ config RELOCATABLE
979 must live at a different physical address than the primary 979 must live at a different physical address than the primary
980 kernel. 980 kernel.
981 981
982# This value must have zeroes in the bottom 60 bits otherwise lots will break
982config PAGE_OFFSET 983config PAGE_OFFSET
983 hex 984 hex
984 default "0xc000000000000000" 985 default "0xc000000000000000"
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812aab5b..b9f426212d3a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) 211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) 212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213#else 213#else
214#ifdef CONFIG_PPC64
215/*
216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
218 */
219#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
220#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
221
222#else /* 32-bit, non book E */
214#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 223#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
215#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 224#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
216#endif 225#endif
226#endif
217 227
218/* 228/*
219 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 229 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f3871e9cf..e2a0a162299b 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
35#include <asm/vdso_datapage.h> 35#include <asm/vdso_datapage.h>
36#include <asm/vio.h> 36#include <asm/vio.h>
37#include <asm/mmu.h> 37#include <asm/mmu.h>
38#include <asm/machdep.h>
38 39
40
41/*
42 * This isn't a module but we expose that to userspace
43 * via /proc so leave the definitions here
44 */
39#define MODULE_VERS "1.9" 45#define MODULE_VERS "1.9"
40#define MODULE_NAME "lparcfg" 46#define MODULE_NAME "lparcfg"
41 47
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
418{ 424{
419 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 425 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
420 426
421 if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) 427 if (firmware_has_feature(FW_FEATURE_LPAR) &&
428 plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
422 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); 429 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
423} 430}
424 431
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
677} 684}
678 685
679static const struct file_operations lparcfg_fops = { 686static const struct file_operations lparcfg_fops = {
680 .owner = THIS_MODULE,
681 .read = seq_read, 687 .read = seq_read,
682 .write = lparcfg_write, 688 .write = lparcfg_write,
683 .open = lparcfg_open, 689 .open = lparcfg_open,
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
699 } 705 }
700 return 0; 706 return 0;
701} 707}
702 708machine_device_initcall(pseries, lparcfg_init);
703static void __exit lparcfg_cleanup(void)
704{
705 remove_proc_subtree("powerpc/lparcfg", NULL);
706}
707
708module_init(lparcfg_init);
709module_exit(lparcfg_cleanup);
710MODULE_DESCRIPTION("Interface for LPAR configuration data");
711MODULE_AUTHOR("Dave Engebretsen");
712MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 48f8375e4c6b..30277e27431a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
101 *begin = new_begin; 101 *begin = new_begin;
102 } 102 }
103 } else { 103 } else {
104 *begin = mmap_legacy_base(); 104 *begin = current->mm->mmap_legacy_base;
105 *end = TASK_SIZE; 105 *end = TASK_SIZE;
106 } 106 }
107} 107}
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index f63778cb2363..25e7e1372bb2 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
98 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 98 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
99 * does, but not when emulating X86_32 99 * does, but not when emulating X86_32
100 */ 100 */
101unsigned long mmap_legacy_base(void) 101static unsigned long mmap_legacy_base(void)
102{ 102{
103 if (mmap_is_ia32()) 103 if (mmap_is_ia32())
104 return TASK_UNMAPPED_BASE; 104 return TASK_UNMAPPED_BASE;
@@ -112,11 +112,13 @@ unsigned long mmap_legacy_base(void)
112 */ 112 */
113void arch_pick_mmap_layout(struct mm_struct *mm) 113void arch_pick_mmap_layout(struct mm_struct *mm)
114{ 114{
115 mm->mmap_legacy_base = mmap_legacy_base();
116 mm->mmap_base = mmap_base();
117
115 if (mmap_is_legacy()) { 118 if (mmap_is_legacy()) {
116 mm->mmap_base = mmap_legacy_base(); 119 mm->mmap_base = mm->mmap_legacy_base;
117 mm->get_unmapped_area = arch_get_unmapped_area; 120 mm->get_unmapped_area = arch_get_unmapped_area;
118 } else { 121 } else {
119 mm->mmap_base = mmap_base();
120 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 122 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
121 } 123 }
122} 124}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 056d11faef21..8f3eea6b80c5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
313 e820_add_region(start, end - start, type); 313 e820_add_region(start, end - start, type);
314} 314}
315 315
316void xen_ignore_unusable(struct e820entry *list, size_t map_size)
317{
318 struct e820entry *entry;
319 unsigned int i;
320
321 for (i = 0, entry = list; i < map_size; i++, entry++) {
322 if (entry->type == E820_UNUSABLE)
323 entry->type = E820_RAM;
324 }
325}
326
316/** 327/**
317 * machine_specific_memory_setup - Hook for machine specific memory setup. 328 * machine_specific_memory_setup - Hook for machine specific memory setup.
318 **/ 329 **/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
353 } 364 }
354 BUG_ON(rc); 365 BUG_ON(rc);
355 366
367 /*
368 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
369 * regions, so if we're using the machine memory map leave the
370 * region as RAM as it is in the pseudo-physical map.
371 *
372 * UNUSABLE regions in domUs are not handled and will need
373 * a patch in the future.
374 */
375 if (xen_initial_domain())
376 xen_ignore_unusable(map, memmap.nr_entries);
377
356 /* Make sure the Xen-supplied memory map is well-ordered. */ 378 /* Make sure the Xen-supplied memory map is well-ordered. */
357 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); 379 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
358 380
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index ca92754eb846..b81c88e51daa 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 695{
696 int rc; 696 int rc;
697 rc = native_cpu_up(cpu, tidle); 697 /*
698 WARN_ON (xen_smp_intr_init(cpu)); 698 * xen_smp_intr_init() needs to run before native_cpu_up()
699 * so that IPI vectors are set up on the booting CPU before
700 * it is marked online in native_cpu_up().
701 */
702 rc = xen_smp_intr_init(cpu);
703 WARN_ON(rc);
704 if (!rc)
705 rc = native_cpu_up(cpu, tidle);
699 return rc; 706 return rc;
700} 707}
701 708
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index e1284b8dc6ee..3270d3c8ba4e 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
908 device->cap._DDC = 1; 908 device->cap._DDC = 1;
909 } 909 }
910 910
911 if (acpi_video_init_brightness(device))
912 return;
913
914 if (acpi_video_backlight_support()) { 911 if (acpi_video_backlight_support()) {
915 struct backlight_properties props; 912 struct backlight_properties props;
916 struct pci_dev *pdev; 913 struct pci_dev *pdev;
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
920 static int count = 0; 917 static int count = 0;
921 char *name; 918 char *name;
922 919
920 result = acpi_video_init_brightness(device);
921 if (result)
922 return;
923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 923 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
924 if (!name) 924 if (!name)
925 return; 925 return;
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
979 if (result) 979 if (result)
980 printk(KERN_ERR PREFIX "Create sysfs link\n"); 980 printk(KERN_ERR PREFIX "Create sysfs link\n");
981 981
982 } else {
983 /* Remove the brightness object. */
984 kfree(device->brightness->levels);
985 kfree(device->brightness);
986 device->brightness = NULL;
987 } 982 }
988} 983}
989 984
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722bb7e2..20fd337a5731 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
289 289
290 /* Disable sending Early R_OK. 290 /* Disable sending Early R_OK.
291 * With "cached read" HDD testing and multiple ports busy on a SATA 291 * With "cached read" HDD testing and multiple ports busy on a SATA
292 * host controller, 3726 PMP will very rarely drop a deferred 292 * host controller, 3x26 PMP will very rarely drop a deferred
293 * R_OK that was intended for the host. Symptom will be all 293 * R_OK that was intended for the host. Symptom will be all
294 * 5 drives under test will timeout, get reset, and recover. 294 * 5 drives under test will timeout, get reset, and recover.
295 */ 295 */
296 if (vendor == 0x1095 && devid == 0x3726) { 296 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
297 u32 reg; 297 u32 reg;
298 298
299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg); 299 err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
300 if (err_mask) { 300 if (err_mask) {
301 rc = -EIO; 301 rc = -EIO;
302 reason = "failed to read Sil3726 Private Register"; 302 reason = "failed to read Sil3x26 Private Register";
303 goto fail; 303 goto fail;
304 } 304 }
305 reg &= ~0x1; 305 reg &= ~0x1;
306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); 306 err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
307 if (err_mask) { 307 if (err_mask) {
308 rc = -EIO; 308 rc = -EIO;
309 reason = "failed to write Sil3726 Private Register"; 309 reason = "failed to write Sil3x26 Private Register";
310 goto fail; 310 goto fail;
311 } 311 }
312 } 312 }
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
383 u16 devid = sata_pmp_gscr_devid(gscr); 383 u16 devid = sata_pmp_gscr_devid(gscr);
384 struct ata_link *link; 384 struct ata_link *link;
385 385
386 if (vendor == 0x1095 && devid == 0x3726) { 386 if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
387 /* sil3726 quirks */ 387 /* sil3x26 quirks */
388 ata_for_each_link(link, ap, EDGE) { 388 ata_for_each_link(link, ap, EDGE) {
389 /* link reports offline after LPM */ 389 /* link reports offline after LPM */
390 link->flags |= ATA_LFLAG_NO_LPM; 390 link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0a4a65..851bd3f43ac6 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
293{ 293{
294 struct sata_fsl_host_priv *host_priv = host->private_data; 294 struct sata_fsl_host_priv *host_priv = host->private_data;
295 void __iomem *hcr_base = host_priv->hcr_base; 295 void __iomem *hcr_base = host_priv->hcr_base;
296 unsigned long flags;
296 297
297 if (count > ICC_MAX_INT_COUNT_THRESHOLD) 298 if (count > ICC_MAX_INT_COUNT_THRESHOLD)
298 count = ICC_MAX_INT_COUNT_THRESHOLD; 299 count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
305 (count > ICC_MIN_INT_COUNT_THRESHOLD)) 306 (count > ICC_MIN_INT_COUNT_THRESHOLD))
306 ticks = ICC_SAFE_INT_TICKS; 307 ticks = ICC_SAFE_INT_TICKS;
307 308
308 spin_lock(&host->lock); 309 spin_lock_irqsave(&host->lock, flags);
309 iowrite32((count << 24 | ticks), hcr_base + ICC); 310 iowrite32((count << 24 | ticks), hcr_base + ICC);
310 311
311 intr_coalescing_count = count; 312 intr_coalescing_count = count;
312 intr_coalescing_ticks = ticks; 313 intr_coalescing_ticks = ticks;
313 spin_unlock(&host->lock); 314 spin_unlock_irqrestore(&host->lock, flags);
314 315
315 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", 316 DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
316 intr_coalescing_count, intr_coalescing_ticks); 317 intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92a456f..e9a4f46d962e 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -86,11 +86,11 @@ struct ecx_plat_data {
86 86
87#define SGPIO_SIGNALS 3 87#define SGPIO_SIGNALS 3
88#define ECX_ACTIVITY_BITS 0x300000 88#define ECX_ACTIVITY_BITS 0x300000
89#define ECX_ACTIVITY_SHIFT 2 89#define ECX_ACTIVITY_SHIFT 0
90#define ECX_LOCATE_BITS 0x80000 90#define ECX_LOCATE_BITS 0x80000
91#define ECX_LOCATE_SHIFT 1 91#define ECX_LOCATE_SHIFT 1
92#define ECX_FAULT_BITS 0x400000 92#define ECX_FAULT_BITS 0x400000
93#define ECX_FAULT_SHIFT 0 93#define ECX_FAULT_SHIFT 2
94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, 94static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
95 u32 shift) 95 u32 shift)
96{ 96{
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2b7813ec6d02..ec386ee9cb22 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev,
141 container_of(dev, struct memory_block, dev); 141 container_of(dev, struct memory_block, dev);
142 142
143 for (i = 0; i < sections_per_block; i++) { 143 for (i = 0; i < sections_per_block; i++) {
144 if (!present_section_nr(mem->start_section_nr + i))
145 continue;
144 pfn = section_nr_to_pfn(mem->start_section_nr + i); 146 pfn = section_nr_to_pfn(mem->start_section_nr + i);
145 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 147 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
146 } 148 }
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 5c1435c4e210..0fccc99881fd 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
332 } 332 }
333 333
334 if (!rbnode->blklen) { 334 if (!rbnode->blklen) {
335 rbnode->blklen = sizeof(*rbnode); 335 rbnode->blklen = 1;
336 rbnode->base_reg = reg; 336 rbnode->base_reg = reg;
337 } 337 }
338 338
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957c97fb..643d7c7a0d8e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -194,7 +194,7 @@ config SIRF_DMA
194 Enable support for the CSR SiRFprimaII DMA engine. 194 Enable support for the CSR SiRFprimaII DMA engine.
195 195
196config TI_EDMA 196config TI_EDMA
197 tristate "TI EDMA support" 197 bool "TI EDMA support"
198 depends on ARCH_DAVINCI || ARCH_OMAP 198 depends on ARCH_DAVINCI || ARCH_OMAP
199 select DMA_ENGINE 199 select DMA_ENGINE
200 select DMA_VIRTUAL_CHANNELS 200 select DMA_VIRTUAL_CHANNELS
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e36603b23b..3bc8414533c9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
500 &status)) 500 &status))
501 goto log_fail; 501 goto log_fail;
502 502
503 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 503 while ((status == SDVO_CMD_STATUS_PENDING ||
504 status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
504 udelay(15); 505 udelay(15);
505 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, 506 if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
506 SDVO_I2C_CMD_STATUS, 507 SDVO_I2C_CMD_STATUS,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6f514297c483..342f1f336168 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -752,6 +752,8 @@
752 will not assert AGPBUSY# and will only 752 will not assert AGPBUSY# and will only
753 be delivered when out of C3. */ 753 be delivered when out of C3. */
754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ 754#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
755#define INSTPM_TLB_INVALIDATE (1<<9)
756#define INSTPM_SYNC_FLUSH (1<<5)
755#define ACTHD 0x020c8 757#define ACTHD 0x020c8
756#define FW_BLC 0x020d8 758#define FW_BLC 0x020d8
757#define FW_BLC2 0x020dc 759#define FW_BLC2 0x020dc
@@ -4438,7 +4440,7 @@
4438#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) 4440#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
4439#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) 4441#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
4440#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) 4442#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
4441#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) 4443#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
4442 4444
4443/* legacy values */ 4445/* legacy values */
4444#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) 4446#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d6..079ef0129e74 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
968 968
969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 969 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
970 POSTING_READ(mmio); 970 POSTING_READ(mmio);
971
972 /* Flush the TLB for this page */
973 if (INTEL_INFO(dev)->gen >= 6) {
974 u32 reg = RING_INSTPM(ring->mmio_base);
975 I915_WRITE(reg,
976 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
977 INSTPM_SYNC_FLUSH));
978 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
979 1000))
980 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
981 ring->name);
982 }
971} 983}
972 984
973static int 985static int
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d8291724dbd4..7a4e0891c5f8 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
98 u32 splitoff; 98 u32 splitoff;
99 u32 s, e; 99 u32 s, e;
100 100
101 BUG_ON(!type);
102
101 list_for_each_entry(this, &mm->free, fl_entry) { 103 list_for_each_entry(this, &mm->free, fl_entry) {
102 e = this->offset + this->length; 104 e = this->offset + this->length;
103 s = this->offset; 105 s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
162 struct nouveau_mm_node *prev, *this, *next; 164 struct nouveau_mm_node *prev, *this, *next;
163 u32 mask = align - 1; 165 u32 mask = align - 1;
164 166
167 BUG_ON(!type);
168
165 list_for_each_entry_reverse(this, &mm->free, fl_entry) { 169 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
166 u32 e = this->offset + this->length; 170 u32 e = this->offset + this->length;
167 u32 s = this->offset; 171 u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30f..9d2cd2006250 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; 20 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
21} 21}
22 22
23#define nouveau_mc_create(p,e,o,d) \ 23#define nouveau_mc_create(p,e,o,m,d) \
24 nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) 24 nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
25#define nouveau_mc_destroy(p) ({ \ 25#define nouveau_mc_destroy(p) ({ \
26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ 26 struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
27}) 27})
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
33}) 33})
34 34
35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, 35int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
36 struct nouveau_oclass *, int, void **); 36 struct nouveau_oclass *, const struct nouveau_mc_intr *,
37 int, void **);
37void _nouveau_mc_dtor(struct nouveau_object *); 38void _nouveau_mc_dtor(struct nouveau_object *);
38int _nouveau_mc_init(struct nouveau_object *); 39int _nouveau_mc_init(struct nouveau_object *);
39int _nouveau_mc_fini(struct nouveau_object *, bool); 40int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a63a02..ab7ef0ac9e34 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
40 return ret; 40 return ret;
41 41
42 switch (pfb914 & 0x00000003) { 42 switch (pfb914 & 0x00000003) {
43 case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; 43 case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
44 case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; 44 case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
45 case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; 45 case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
46 case 0x00000003: break; 46 case 0x00000003: break;
47 } 47 }
48 48
49 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 49 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
50 pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; 50 ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
51 pfb->ram->tags = nv_rd32(pfb, 0x100320); 51 ram->tags = nv_rd32(pfb, 0x100320);
52 return 0; 52 return 0;
53} 53}
54 54
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6e5577..63a6aab86028 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
38 if (ret) 38 if (ret)
39 return ret; 39 return ret;
40 40
41 pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; 41 ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
42 pfb->ram->type = NV_MEM_TYPE_STOLEN; 42 ram->type = NV_MEM_TYPE_STOLEN;
43 return 0; 43 return 0;
44} 44}
45 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883018f4..cce65cc56514 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
30 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr; 31 u32 part_nr;
32 u32 subp_nr; 32 u32 subp_nr;
33 struct nouveau_mm tags;
34 u32 num_tags; 33 u32 num_tags;
34 u32 tag_base;
35 struct nouveau_mm tags;
35 struct nouveau_mm_node *tag_ram; 36 struct nouveau_mm_node *tag_ram;
36}; 37};
37 38
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
117 u32 tag_size, tag_margin, tag_align; 118 u32 tag_size, tag_margin, tag_align;
118 int ret; 119 int ret;
119 120
120 nv_wr32(priv, 0x17e8d8, priv->part_nr);
121 if (nv_device(pfb)->card_type >= NV_E0)
122 nv_wr32(priv, 0x17e000, priv->part_nr);
123
124 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 121 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
125 priv->num_tags = (pfb->ram->size >> 17) / 4; 122 priv->num_tags = (pfb->ram->size >> 17) / 4;
126 if (priv->num_tags > (1 << 17)) 123 if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
142 tag_size += tag_align; 139 tag_size += tag_align;
143 tag_size = (tag_size + 0xfff) >> 12; /* round up */ 140 tag_size = (tag_size + 0xfff) >> 12; /* round up */
144 141
145 ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, 142 ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
146 &priv->tag_ram); 143 &priv->tag_ram);
147 if (ret) { 144 if (ret) {
148 priv->num_tags = 0; 145 priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
152 tag_base += tag_align - 1; 149 tag_base += tag_align - 1;
153 ret = do_div(tag_base, tag_align); 150 ret = do_div(tag_base, tag_align);
154 151
155 nv_wr32(priv, 0x17e8d4, tag_base); 152 priv->tag_base = tag_base;
156 } 153 }
157 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); 154 ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
158 155
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
182 } 179 }
183 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
184 181
185 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
186
187 ret = nvc0_ltcg_init_tag_ram(pfb, priv); 182 ret = nvc0_ltcg_init_tag_ram(pfb, priv);
188 if (ret) 183 if (ret)
189 return ret; 184 return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
209 nouveau_ltcg_destroy(ltcg); 204 nouveau_ltcg_destroy(ltcg);
210} 205}
211 206
207static int
208nvc0_ltcg_init(struct nouveau_object *object)
209{
210 struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
211 struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
212 int ret;
213
214 ret = nouveau_ltcg_init(ltcg);
215 if (ret)
216 return ret;
217
218 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
219 nv_wr32(priv, 0x17e8d8, priv->part_nr);
220 if (nv_device(ltcg)->card_type >= NV_E0)
221 nv_wr32(priv, 0x17e000, priv->part_nr);
222 nv_wr32(priv, 0x17e8d4, priv->tag_base);
223 return 0;
224}
225
212struct nouveau_oclass 226struct nouveau_oclass
213nvc0_ltcg_oclass = { 227nvc0_ltcg_oclass = {
214 .handle = NV_SUBDEV(LTCG, 0xc0), 228 .handle = NV_SUBDEV(LTCG, 0xc0),
215 .ofuncs = &(struct nouveau_ofuncs) { 229 .ofuncs = &(struct nouveau_ofuncs) {
216 .ctor = nvc0_ltcg_ctor, 230 .ctor = nvc0_ltcg_ctor,
217 .dtor = nvc0_ltcg_dtor, 231 .dtor = nvc0_ltcg_dtor,
218 .init = _nouveau_ltcg_init, 232 .init = nvc0_ltcg_init,
219 .fini = _nouveau_ltcg_fini, 233 .fini = _nouveau_ltcg_fini,
220 }, 234 },
221}; 235};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a4..ec9cd6f10f91 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
80 80
81int 81int
82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, 82nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
83 struct nouveau_oclass *oclass, int length, void **pobject) 83 struct nouveau_oclass *oclass,
84 const struct nouveau_mc_intr *intr_map,
85 int length, void **pobject)
84{ 86{
85 struct nouveau_device *device = nv_device(parent); 87 struct nouveau_device *device = nv_device(parent);
86 struct nouveau_mc *pmc; 88 struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
92 if (ret) 94 if (ret)
93 return ret; 95 return ret;
94 96
97 pmc->intr_map = intr_map;
98
95 ret = request_irq(device->pdev->irq, nouveau_mc_intr, 99 ret = request_irq(device->pdev->irq, nouveau_mc_intr,
96 IRQF_SHARED, "nouveau", pmc); 100 IRQF_SHARED, "nouveau", pmc);
97 if (ret < 0) 101 if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227b..64aa4edb0d9d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nv04_mc_priv *priv; 50 struct nv04_mc_priv *priv;
51 int ret; 51 int ret;
52 52
53 ret = nouveau_mc_create(parent, engine, oclass, &priv); 53 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
54 *pobject = nv_object(priv); 54 *pobject = nv_object(priv);
55 if (ret) 55 if (ret)
56 return ret; 56 return ret;
57 57
58 priv->base.intr_map = nv04_mc_intr;
59 return 0; 58 return 0;
60} 59}
61 60
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810f..d9891782bf28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
36 struct nv44_mc_priv *priv; 36 struct nv44_mc_priv *priv;
37 int ret; 37 int ret;
38 38
39 ret = nouveau_mc_create(parent, engine, oclass, &priv); 39 ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
40 *pobject = nv_object(priv); 40 *pobject = nv_object(priv);
41 if (ret) 41 if (ret)
42 return ret; 42 return ret;
43 43
44 priv->base.intr_map = nv04_mc_intr;
45 return 0; 44 return 0;
46} 45}
47 46
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index f25fc5fc7dd1..2b1afe225db8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
53 struct nv50_mc_priv *priv; 53 struct nv50_mc_priv *priv;
54 int ret; 54 int ret;
55 55
56 ret = nouveau_mc_create(parent, engine, oclass, &priv); 56 ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
57 *pobject = nv_object(priv); 57 *pobject = nv_object(priv);
58 if (ret) 58 if (ret)
59 return ret; 59 return ret;
60 60
61 priv->base.intr_map = nv50_mc_intr;
62 return 0; 61 return 0;
63} 62}
64 63
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b5041..0d57b4d3e001 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
54 struct nv98_mc_priv *priv; 54 struct nv98_mc_priv *priv;
55 int ret; 55 int ret;
56 56
57 ret = nouveau_mc_create(parent, engine, oclass, &priv); 57 ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
58 *pobject = nv_object(priv); 58 *pobject = nv_object(priv);
59 if (ret) 59 if (ret)
60 return ret; 60 return ret;
61 61
62 priv->base.intr_map = nv98_mc_intr;
63 return 0; 62 return 0;
64} 63}
65 64
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3babbc62..104175c5a2dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
57 struct nvc0_mc_priv *priv; 57 struct nvc0_mc_priv *priv;
58 int ret; 58 int ret;
59 59
60 ret = nouveau_mc_create(parent, engine, oclass, &priv); 60 ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
61 *pobject = nv_object(priv); 61 *pobject = nv_object(priv);
62 if (ret) 62 if (ret)
63 return ret; 63 return ret;
64 64
65 priv->base.intr_map = nvc0_mc_intr;
66 return 0; 65 return 0;
67} 66}
68 67
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2f1e04..6a13ffb53bdb 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
606 regp->ramdac_a34 = 0x1; 606 regp->ramdac_a34 = 0x1;
607} 607}
608 608
609static int
610nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
611{
612 struct nv04_display *disp = nv04_display(crtc->dev);
613 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
614 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
615 int ret;
616
617 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
618 if (ret == 0) {
619 if (disp->image[nv_crtc->index])
620 nouveau_bo_unpin(disp->image[nv_crtc->index]);
621 nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
622 }
623
624 return ret;
625}
626
609/** 627/**
610 * Sets up registers for the given mode/adjusted_mode pair. 628 * Sets up registers for the given mode/adjusted_mode pair.
611 * 629 *
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
622 struct drm_device *dev = crtc->dev; 640 struct drm_device *dev = crtc->dev;
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 641 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct nouveau_drm *drm = nouveau_drm(dev); 642 struct nouveau_drm *drm = nouveau_drm(dev);
643 int ret;
625 644
626 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); 645 NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
627 drm_mode_debug_printmodeline(adjusted_mode); 646 drm_mode_debug_printmodeline(adjusted_mode);
628 647
648 ret = nv_crtc_swap_fbs(crtc, old_fb);
649 if (ret)
650 return ret;
651
629 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ 652 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
630 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); 653 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
631 654
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
722 745
723static void nv_crtc_destroy(struct drm_crtc *crtc) 746static void nv_crtc_destroy(struct drm_crtc *crtc)
724{ 747{
748 struct nv04_display *disp = nv04_display(crtc->dev);
725 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 749 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
726 750
727 if (!nv_crtc) 751 if (!nv_crtc)
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
729 753
730 drm_crtc_cleanup(crtc); 754 drm_crtc_cleanup(crtc);
731 755
756 if (disp->image[nv_crtc->index])
757 nouveau_bo_unpin(disp->image[nv_crtc->index]);
758 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
759
732 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 760 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
733 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 761 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
734 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 762 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
754} 782}
755 783
756static void 784static void
785nv_crtc_disable(struct drm_crtc *crtc)
786{
787 struct nv04_display *disp = nv04_display(crtc->dev);
788 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
789 if (disp->image[nv_crtc->index])
790 nouveau_bo_unpin(disp->image[nv_crtc->index]);
791 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
792}
793
794static void
757nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 795nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
758 uint32_t size) 796 uint32_t size)
759{ 797{
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
791 struct drm_framebuffer *drm_fb; 829 struct drm_framebuffer *drm_fb;
792 struct nouveau_framebuffer *fb; 830 struct nouveau_framebuffer *fb;
793 int arb_burst, arb_lwm; 831 int arb_burst, arb_lwm;
794 int ret;
795 832
796 NV_DEBUG(drm, "index %d\n", nv_crtc->index); 833 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
797 834
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
801 return 0; 838 return 0;
802 } 839 }
803 840
804
805 /* If atomic, we want to switch to the fb we were passed, so 841 /* If atomic, we want to switch to the fb we were passed, so
806 * now we update pointers to do that. (We don't pin; just 842 * now we update pointers to do that.
807 * assume we're already pinned and update the base address.)
808 */ 843 */
809 if (atomic) { 844 if (atomic) {
810 drm_fb = passed_fb; 845 drm_fb = passed_fb;
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
812 } else { 847 } else {
813 drm_fb = crtc->fb; 848 drm_fb = crtc->fb;
814 fb = nouveau_framebuffer(crtc->fb); 849 fb = nouveau_framebuffer(crtc->fb);
815 /* If not atomic, we can go ahead and pin, and unpin the
816 * old fb we were passed.
817 */
818 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
819 if (ret)
820 return ret;
821
822 if (passed_fb) {
823 struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
824 nouveau_bo_unpin(ofb->nvbo);
825 }
826 } 850 }
827 851
828 nv_crtc->fb.offset = fb->nvbo->bo.offset; 852 nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +901,9 @@ static int
877nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 901nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
878 struct drm_framebuffer *old_fb) 902 struct drm_framebuffer *old_fb)
879{ 903{
904 int ret = nv_crtc_swap_fbs(crtc, old_fb);
905 if (ret)
906 return ret;
880 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); 907 return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
881} 908}
882 909
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
1027 .mode_set_base = nv04_crtc_mode_set_base, 1054 .mode_set_base = nv04_crtc_mode_set_base,
1028 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, 1055 .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
1029 .load_lut = nv_crtc_gamma_load, 1056 .load_lut = nv_crtc_gamma_load,
1057 .disable = nv_crtc_disable,
1030}; 1058};
1031 1059
1032int 1060int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13f..9928187f0a7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@ struct nv04_display {
81 uint32_t saved_vga_font[4][16384]; 81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4]; 82 uint32_t dac_users[4];
83 struct nouveau_object *core; 83 struct nouveau_object *core;
84 struct nouveau_bo *image[2];
84}; 85};
85 86
86static inline struct nv04_display * 87static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20ef6d4d..a03e75deacaf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
577 ret = nv50_display_flip_next(crtc, fb, chan, 0); 577 ret = nv50_display_flip_next(crtc, fb, chan, 0);
578 if (ret) 578 if (ret)
579 goto fail_unreserve; 579 goto fail_unreserve;
580 } else {
581 struct nv04_display *dispnv04 = nv04_display(dev);
582 nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
580 } 583 }
581 584
582 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); 585 ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd0b203..625f80d53dc2 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
131 if (clk < pll->vco1.max_freq) 131 if (clk < pll->vco1.max_freq)
132 pll->vco2.max_freq = 0; 132 pll->vco2.max_freq = 0;
133 133
134 pclk->pll_calc(pclk, pll, clk, &coef); 134 ret = pclk->pll_calc(pclk, pll, clk, &coef);
135 if (ret == 0) 135 if (ret == 0)
136 return -ERANGE; 136 return -ERANGE;
137 137
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730764a5..1a0bf07fe54b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
29#include <drm/drmP.h> 29#include <drm/drmP.h>
30#include <drm/ttm/ttm_bo_driver.h> 30#include <drm/ttm/ttm_bo_driver.h>
31 31
32#define VMW_PPN_SIZE sizeof(unsigned long) 32#define VMW_PPN_SIZE (sizeof(unsigned long))
33/* A future safe maximum remap size. */
34#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
33 35
34static int vmw_gmr2_bind(struct vmw_private *dev_priv, 36static int vmw_gmr2_bind(struct vmw_private *dev_priv,
35 struct page *pages[], 37 struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
38{ 40{
39 SVGAFifoCmdDefineGMR2 define_cmd; 41 SVGAFifoCmdDefineGMR2 define_cmd;
40 SVGAFifoCmdRemapGMR2 remap_cmd; 42 SVGAFifoCmdRemapGMR2 remap_cmd;
41 uint32_t define_size = sizeof(define_cmd) + 4;
42 uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
43 uint32_t *cmd; 43 uint32_t *cmd;
44 uint32_t *cmd_orig; 44 uint32_t *cmd_orig;
45 uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
46 uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
47 uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
48 uint32_t remap_pos = 0;
49 uint32_t cmd_size = define_size + remap_size;
45 uint32_t i; 50 uint32_t i;
46 51
47 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); 52 cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
48 if (unlikely(cmd == NULL)) 53 if (unlikely(cmd == NULL))
49 return -ENOMEM; 54 return -ENOMEM;
50 55
51 define_cmd.gmrId = gmr_id; 56 define_cmd.gmrId = gmr_id;
52 define_cmd.numPages = num_pages; 57 define_cmd.numPages = num_pages;
53 58
59 *cmd++ = SVGA_CMD_DEFINE_GMR2;
60 memcpy(cmd, &define_cmd, sizeof(define_cmd));
61 cmd += sizeof(define_cmd) / sizeof(*cmd);
62
63 /*
64 * Need to split the command if there are too many
65 * pages that goes into the gmr.
66 */
67
54 remap_cmd.gmrId = gmr_id; 68 remap_cmd.gmrId = gmr_id;
55 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? 69 remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
56 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; 70 SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
57 remap_cmd.offsetPages = 0;
58 remap_cmd.numPages = num_pages;
59 71
60 *cmd++ = SVGA_CMD_DEFINE_GMR2; 72 while (num_pages > 0) {
61 memcpy(cmd, &define_cmd, sizeof(define_cmd)); 73 unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
62 cmd += sizeof(define_cmd) / sizeof(uint32); 74
75 remap_cmd.offsetPages = remap_pos;
76 remap_cmd.numPages = nr;
63 77
64 *cmd++ = SVGA_CMD_REMAP_GMR2; 78 *cmd++ = SVGA_CMD_REMAP_GMR2;
65 memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); 79 memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
66 cmd += sizeof(remap_cmd) / sizeof(uint32); 80 cmd += sizeof(remap_cmd) / sizeof(*cmd);
67 81
68 for (i = 0; i < num_pages; ++i) { 82 for (i = 0; i < nr; ++i) {
69 if (VMW_PPN_SIZE <= 4) 83 if (VMW_PPN_SIZE <= 4)
70 *cmd = page_to_pfn(*pages++); 84 *cmd = page_to_pfn(*pages++);
71 else 85 else
72 *((uint64_t *)cmd) = page_to_pfn(*pages++); 86 *((uint64_t *)cmd) = page_to_pfn(*pages++);
73 87
74 cmd += VMW_PPN_SIZE / sizeof(*cmd); 88 cmd += VMW_PPN_SIZE / sizeof(*cmd);
89 }
90
91 num_pages -= nr;
92 remap_pos += nr;
75 } 93 }
76 94
77 vmw_fifo_commit(dev_priv, define_size + remap_size); 95 BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
96
97 vmw_fifo_commit(dev_priv, cmd_size);
78 98
79 return 0; 99 return 0;
80} 100}
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e60b04..c1cd5698b8ae 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
232 232
233 switch (mask) { 233 switch (mask) {
234 case IIO_CHAN_INFO_RAW: 234 case IIO_CHAN_INFO_RAW:
235 ret = adjd_s311_read_data(indio_dev, chan->address, val); 235 ret = adjd_s311_read_data(indio_dev,
236 ADJD_S311_DATA_REG(chan->address), val);
236 if (ret < 0) 237 if (ret < 0)
237 return ret; 238 return ret;
238 return IIO_VAL_INT; 239 return IIO_VAL_INT;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fa061d46527f..75e3b102ce45 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -167,6 +167,7 @@ static const struct xpad_device {
167 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 167 { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
168 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, 168 { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
169 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 169 { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
170 { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
170 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, 171 { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
171 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, 172 { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
172 { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, 173 { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 57b2637e153a..8551dcaf24db 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
672 */ 672 */
673static int elantech_packet_check_v3(struct psmouse *psmouse) 673static int elantech_packet_check_v3(struct psmouse *psmouse)
674{ 674{
675 struct elantech_data *etd = psmouse->private;
675 const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; 676 const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
676 unsigned char *packet = psmouse->packet; 677 unsigned char *packet = psmouse->packet;
677 678
@@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
682 if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) 683 if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
683 return PACKET_DEBOUNCE; 684 return PACKET_DEBOUNCE;
684 685
685 if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) 686 /*
686 return PACKET_V3_HEAD; 687 * If the hardware flag 'crc_enabled' is set the packets have
688 * different signatures.
689 */
690 if (etd->crc_enabled) {
691 if ((packet[3] & 0x09) == 0x08)
692 return PACKET_V3_HEAD;
693
694 if ((packet[3] & 0x09) == 0x09)
695 return PACKET_V3_TAIL;
696 } else {
697 if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
698 return PACKET_V3_HEAD;
687 699
688 if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) 700 if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
689 return PACKET_V3_TAIL; 701 return PACKET_V3_TAIL;
702 }
690 703
691 return PACKET_UNKNOWN; 704 return PACKET_UNKNOWN;
692} 705}
693 706
694static int elantech_packet_check_v4(struct psmouse *psmouse) 707static int elantech_packet_check_v4(struct psmouse *psmouse)
695{ 708{
709 struct elantech_data *etd = psmouse->private;
696 unsigned char *packet = psmouse->packet; 710 unsigned char *packet = psmouse->packet;
697 unsigned char packet_type = packet[3] & 0x03; 711 unsigned char packet_type = packet[3] & 0x03;
712 bool sanity_check;
713
714 /*
715 * Sanity check based on the constant bits of a packet.
716 * The constant bits change depending on the value of
717 * the hardware flag 'crc_enabled' but are the same for
718 * every packet, regardless of the type.
719 */
720 if (etd->crc_enabled)
721 sanity_check = ((packet[3] & 0x08) == 0x00);
722 else
723 sanity_check = ((packet[0] & 0x0c) == 0x04 &&
724 (packet[3] & 0x1c) == 0x10);
725
726 if (!sanity_check)
727 return PACKET_UNKNOWN;
698 728
699 switch (packet_type) { 729 switch (packet_type) {
700 case 0: 730 case 0:
@@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
1313 etd->reports_pressure = true; 1343 etd->reports_pressure = true;
1314 } 1344 }
1315 1345
1346 /*
1347 * The signatures of v3 and v4 packets change depending on the
1348 * value of this hardware flag.
1349 */
1350 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
1351
1316 return 0; 1352 return 0;
1317} 1353}
1318 1354
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 46db3be45ac9..036a04abaef7 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
129 bool paritycheck; 129 bool paritycheck;
130 bool jumpy_cursor; 130 bool jumpy_cursor;
131 bool reports_pressure; 131 bool reports_pressure;
132 bool crc_enabled;
132 unsigned char hw_version; 133 unsigned char hw_version;
133 unsigned int fw_version; 134 unsigned int fw_version;
134 unsigned int single_finger_reports; 135 unsigned int single_finger_reports;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 94c17c28d268..1e691a3a79cb 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -22,7 +22,8 @@ config SERIO_I8042
22 tristate "i8042 PC Keyboard controller" if EXPERT || !X86 22 tristate "i8042 PC Keyboard controller" if EXPERT || !X86
23 default y 23 default y
24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ 24 depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
25 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 25 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
26 !ARC
26 help 27 help
27 i8042 is the chip over which the standard AT keyboard and PS/2 28 i8042 is the chip over which the standard AT keyboard and PS/2
28 mouse are connected to the computer. If you use these devices, 29 mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 384fbcd0cee0..f3e91f0b57ae 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA =
2112 { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 2112 { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
2113 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2113 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2114 .touch_max = 2 }; 2114 .touch_max = 2 };
2115static struct wacom_features wacom_features_0xDB = 2115static const struct wacom_features wacom_features_0xDB =
2116 { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 2116 { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
2117 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2117 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2118 .touch_max = 2 }; 2118 .touch_max = 2 };
@@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF =
2127 { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, 2127 { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
2128 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 2128 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2129 .touch_max = 16 }; 2129 .touch_max = 16 };
2130static const struct wacom_features wacom_features_0x300 =
2131 { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
2132 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2133static const struct wacom_features wacom_features_0x301 =
2134 { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
2135 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2130static const struct wacom_features wacom_features_0x6004 = 2136static const struct wacom_features wacom_features_0x6004 =
2131 { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, 2137 { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
2132 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2138 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = {
2253 { USB_DEVICE_WACOM(0x100) }, 2259 { USB_DEVICE_WACOM(0x100) },
2254 { USB_DEVICE_WACOM(0x101) }, 2260 { USB_DEVICE_WACOM(0x101) },
2255 { USB_DEVICE_WACOM(0x10D) }, 2261 { USB_DEVICE_WACOM(0x10D) },
2262 { USB_DEVICE_WACOM(0x300) },
2263 { USB_DEVICE_WACOM(0x301) },
2256 { USB_DEVICE_WACOM(0x304) }, 2264 { USB_DEVICE_WACOM(0x304) },
2257 { USB_DEVICE_WACOM(0x4001) }, 2265 { USB_DEVICE_WACOM(0x4001) },
2258 { USB_DEVICE_WACOM(0x47) }, 2266 { USB_DEVICE_WACOM(0x47) },
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 69ea44ebcf61..4851afae38dc 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -23,7 +23,7 @@
23#define SIRFSOC_INT_RISC_LEVEL1 0x0024 23#define SIRFSOC_INT_RISC_LEVEL1 0x0024
24#define SIRFSOC_INIT_IRQ_ID 0x0038 24#define SIRFSOC_INIT_IRQ_ID 0x0038
25 25
26#define SIRFSOC_NUM_IRQS 128 26#define SIRFSOC_NUM_IRQS 64
27 27
28static struct irq_domain *sirfsoc_irqdomain; 28static struct irq_domain *sirfsoc_irqdomain;
29 29
@@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
32{ 32{
33 struct irq_chip_generic *gc; 33 struct irq_chip_generic *gc;
34 struct irq_chip_type *ct; 34 struct irq_chip_type *ct;
35 int ret;
36 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
35 37
36 gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); 38 ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
37 ct = gc->chip_types; 39 handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
38 40
41 gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
42 gc->reg_base = base;
43 ct = gc->chip_types;
39 ct->chip.irq_mask = irq_gc_mask_clr_bit; 44 ct->chip.irq_mask = irq_gc_mask_clr_bit;
40 ct->chip.irq_unmask = irq_gc_mask_set_bit; 45 ct->chip.irq_unmask = irq_gc_mask_set_bit;
41 ct->regs.mask = SIRFSOC_INT_RISC_MASK0; 46 ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
42
43 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
44} 47}
45 48
46static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) 49static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
@@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p
60 if (!base) 63 if (!base)
61 panic("unable to map intc cpu registers\n"); 64 panic("unable to map intc cpu registers\n");
62 65
63 /* using legacy because irqchip_generic does not work with linear */ 66 sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
64 sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, 67 &irq_generic_chip_ops, base);
65 &irq_domain_simple_ops, base);
66 68
67 sirfsoc_alloc_gc(base, 0, 32); 69 sirfsoc_alloc_gc(base, 0, 32);
68 sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); 70 sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 22b720ec80cb..77025f5cb57d 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
288 u8 *data; 288 u8 *data;
289 int len; 289 int len;
290 290
291 if (skb->len < sizeof(int)) 291 if (skb->len < sizeof(int)) {
292 printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); 292 printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
293 return -EINVAL;
294 }
293 cont = *((int *)skb->data); 295 cont = *((int *)skb->data);
294 len = skb->len - sizeof(int); 296 len = skb->len - sizeof(int);
295 data = skb->data + sizeof(int); 297 data = skb->data + sizeof(int);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index dc112a7137fe..4296155090b2 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -959,23 +959,21 @@ out:
959 return r; 959 return r;
960} 960}
961 961
962static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) 962static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
963{ 963{
964 struct entry *e = hash_lookup(mq, oblock); 964 struct mq_policy *mq = to_mq_policy(p);
965 struct entry *e;
966
967 mutex_lock(&mq->lock);
968
969 e = hash_lookup(mq, oblock);
965 970
966 BUG_ON(!e || !e->in_cache); 971 BUG_ON(!e || !e->in_cache);
967 972
968 del(mq, e); 973 del(mq, e);
969 e->in_cache = false; 974 e->in_cache = false;
970 push(mq, e); 975 push(mq, e);
971}
972 976
973static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
974{
975 struct mq_policy *mq = to_mq_policy(p);
976
977 mutex_lock(&mq->lock);
978 remove_mapping(mq, oblock);
979 mutex_unlock(&mq->lock); 977 mutex_unlock(&mq->lock);
980} 978}
981 979
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ce9b387b5a19..00b88cbfde25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1333,6 +1333,8 @@ enum {
1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 1333 BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1334 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN, 1335 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1336 BNX2X_SP_RTNL_TX_STOP,
1337 BNX2X_SP_RTNL_TX_RESUME,
1336}; 1338};
1337 1339
1338struct bnx2x_prev_path_list { 1340struct bnx2x_prev_path_list {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f2d1ff10054b..0cc26110868d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; 53 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54 int old_max_eth_txqs, new_max_eth_txqs; 54 int old_max_eth_txqs, new_max_eth_txqs;
55 int old_txdata_index = 0, new_txdata_index = 0; 55 int old_txdata_index = 0, new_txdata_index = 0;
56 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
56 57
57 /* Copy the NAPI object as it has been already initialized */ 58 /* Copy the NAPI object as it has been already initialized */
58 from_fp->napi = to_fp->napi; 59 from_fp->napi = to_fp->napi;
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
61 memcpy(to_fp, from_fp, sizeof(*to_fp)); 62 memcpy(to_fp, from_fp, sizeof(*to_fp));
62 to_fp->index = to; 63 to_fp->index = to;
63 64
65 /* Retain the tpa_info of the original `to' version as we don't want
66 * 2 FPs to contain the same tpa_info pointer.
67 */
68 to_fp->tpa_info = old_tpa_info;
69
64 /* move sp_objs contents as well, as their indices match fp ones */ 70 /* move sp_objs contents as well, as their indices match fp ones */
65 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); 71 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
66 72
@@ -2956,8 +2962,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2956 if (IS_PF(bp)) { 2962 if (IS_PF(bp)) {
2957 if (CNIC_LOADED(bp)) 2963 if (CNIC_LOADED(bp))
2958 bnx2x_free_mem_cnic(bp); 2964 bnx2x_free_mem_cnic(bp);
2959 bnx2x_free_mem(bp);
2960 } 2965 }
2966 bnx2x_free_mem(bp);
2967
2961 bp->state = BNX2X_STATE_CLOSED; 2968 bp->state = BNX2X_STATE_CLOSED;
2962 bp->cnic_loaded = false; 2969 bp->cnic_loaded = false;
2963 2970
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index f9122f2d6b65..fcf2761d8828 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -30,10 +30,8 @@
30#include "bnx2x_dcb.h" 30#include "bnx2x_dcb.h"
31 31
32/* forward declarations of dcbx related functions */ 32/* forward declarations of dcbx related functions */
33static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
34static void bnx2x_pfc_set_pfc(struct bnx2x *bp); 33static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
35static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp); 34static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
36static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
37static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp, 35static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
38 u32 *set_configuration_ets_pg, 36 u32 *set_configuration_ets_pg,
39 u32 *pri_pg_tbl); 37 u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
425 bnx2x_pfc_clear(bp); 423 bnx2x_pfc_clear(bp);
426} 424}
427 425
428static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp) 426int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
429{ 427{
430 struct bnx2x_func_state_params func_params = {NULL}; 428 struct bnx2x_func_state_params func_params = {NULL};
429 int rc;
431 430
432 func_params.f_obj = &bp->func_obj; 431 func_params.f_obj = &bp->func_obj;
433 func_params.cmd = BNX2X_F_CMD_TX_STOP; 432 func_params.cmd = BNX2X_F_CMD_TX_STOP;
434 433
434 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
435 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
436
435 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n"); 437 DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
436 return bnx2x_func_state_change(bp, &func_params); 438
439 rc = bnx2x_func_state_change(bp, &func_params);
440 if (rc) {
441 BNX2X_ERR("Unable to hold traffic for HW configuration\n");
442 bnx2x_panic();
443 }
444
445 return rc;
437} 446}
438 447
439static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp) 448int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
440{ 449{
441 struct bnx2x_func_state_params func_params = {NULL}; 450 struct bnx2x_func_state_params func_params = {NULL};
442 struct bnx2x_func_tx_start_params *tx_params = 451 struct bnx2x_func_tx_start_params *tx_params =
443 &func_params.params.tx_start; 452 &func_params.params.tx_start;
453 int rc;
444 454
445 func_params.f_obj = &bp->func_obj; 455 func_params.f_obj = &bp->func_obj;
446 func_params.cmd = BNX2X_F_CMD_TX_START; 456 func_params.cmd = BNX2X_F_CMD_TX_START;
447 457
458 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
459 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
460
448 bnx2x_dcbx_fw_struct(bp, tx_params); 461 bnx2x_dcbx_fw_struct(bp, tx_params);
449 462
450 DP(BNX2X_MSG_DCB, "START TRAFFIC\n"); 463 DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
451 return bnx2x_func_state_change(bp, &func_params); 464
465 rc = bnx2x_func_state_change(bp, &func_params);
466 if (rc) {
467 BNX2X_ERR("Unable to resume traffic after HW configuration\n");
468 bnx2x_panic();
469 }
470
471 return rc;
452} 472}
453 473
454static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp) 474static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
744 if (IS_MF(bp)) 764 if (IS_MF(bp))
745 bnx2x_link_sync_notify(bp); 765 bnx2x_link_sync_notify(bp);
746 766
747 bnx2x_dcbx_stop_hw_tx(bp); 767 set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
768
769 schedule_delayed_work(&bp->sp_rtnl_task, 0);
748 770
749 return; 771 return;
750 } 772 }
@@ -757,7 +779,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
757 /* ets may affect cmng configuration: reinit it in hw */ 779 /* ets may affect cmng configuration: reinit it in hw */
758 bnx2x_set_local_cmng(bp); 780 bnx2x_set_local_cmng(bp);
759 781
760 bnx2x_dcbx_resume_hw_tx(bp); 782 set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
783
784 schedule_delayed_work(&bp->sp_rtnl_task, 0);
761 785
762 return; 786 return;
763 case BNX2X_DCBX_STATE_TX_RELEASED: 787 case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2367,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2367 case DCB_FEATCFG_ATTR_PG: 2391 case DCB_FEATCFG_ATTR_PG:
2368 if (bp->dcbx_local_feat.ets.enabled) 2392 if (bp->dcbx_local_feat.ets.enabled)
2369 *flags |= DCB_FEATCFG_ENABLE; 2393 *flags |= DCB_FEATCFG_ENABLE;
2370 if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR) 2394 if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
2395 DCBX_REMOTE_MIB_ERROR))
2371 *flags |= DCB_FEATCFG_ERROR; 2396 *flags |= DCB_FEATCFG_ERROR;
2372 break; 2397 break;
2373 case DCB_FEATCFG_ATTR_PFC: 2398 case DCB_FEATCFG_ATTR_PFC:
2374 if (bp->dcbx_local_feat.pfc.enabled) 2399 if (bp->dcbx_local_feat.pfc.enabled)
2375 *flags |= DCB_FEATCFG_ENABLE; 2400 *flags |= DCB_FEATCFG_ENABLE;
2376 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR | 2401 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
2377 DCBX_LOCAL_PFC_MISMATCH)) 2402 DCBX_LOCAL_PFC_MISMATCH |
2403 DCBX_REMOTE_MIB_ERROR))
2378 *flags |= DCB_FEATCFG_ERROR; 2404 *flags |= DCB_FEATCFG_ERROR;
2379 break; 2405 break;
2380 case DCB_FEATCFG_ATTR_APP: 2406 case DCB_FEATCFG_ATTR_APP:
2381 if (bp->dcbx_local_feat.app.enabled) 2407 if (bp->dcbx_local_feat.app.enabled)
2382 *flags |= DCB_FEATCFG_ENABLE; 2408 *flags |= DCB_FEATCFG_ENABLE;
2383 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR | 2409 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
2384 DCBX_LOCAL_APP_MISMATCH)) 2410 DCBX_LOCAL_APP_MISMATCH |
2411 DCBX_REMOTE_MIB_ERROR))
2385 *flags |= DCB_FEATCFG_ERROR; 2412 *flags |= DCB_FEATCFG_ERROR;
2386 break; 2413 break;
2387 default: 2414 default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b6586f..804b8f64463e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
199int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); 199int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
200#endif /* BCM_DCBNL */ 200#endif /* BCM_DCBNL */
201 201
202int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
203int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
204
202#endif /* BNX2X_DCB_H */ 205#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 955d6cfd9cb7..1627a4e09c32 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH; 2261 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2262} 2262}
2263 2263
2264static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2265{
2266 u32 pause_enabled = 0;
2267
2268 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2269 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2270 pause_enabled = 1;
2271
2272 REG_WR(bp, BAR_USTRORM_INTMEM +
2273 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2274 pause_enabled);
2275 }
2276
2277 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2278 pause_enabled ? "enabled" : "disabled");
2279}
2280
2264int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2281int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2265{ 2282{
2266 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp); 2283 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2294 2311
2295 bnx2x_release_phy_lock(bp); 2312 bnx2x_release_phy_lock(bp);
2296 2313
2314 bnx2x_init_dropless_fc(bp);
2315
2297 bnx2x_calc_fc_adv(bp); 2316 bnx2x_calc_fc_adv(bp);
2298 2317
2299 if (bp->link_vars.link_up) { 2318 if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
2315 bnx2x_phy_init(&bp->link_params, &bp->link_vars); 2334 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2316 bnx2x_release_phy_lock(bp); 2335 bnx2x_release_phy_lock(bp);
2317 2336
2337 bnx2x_init_dropless_fc(bp);
2338
2318 bnx2x_calc_fc_adv(bp); 2339 bnx2x_calc_fc_adv(bp);
2319 } else 2340 } else
2320 BNX2X_ERR("Bootcode is missing - can not set link\n"); 2341 BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2556,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2556 2577
2557 bnx2x_link_update(&bp->link_params, &bp->link_vars); 2578 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2558 2579
2559 if (bp->link_vars.link_up) { 2580 bnx2x_init_dropless_fc(bp);
2560 2581
2561 /* dropless flow control */ 2582 if (bp->link_vars.link_up) {
2562 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2563 int port = BP_PORT(bp);
2564 u32 pause_enabled = 0;
2565
2566 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2567 pause_enabled = 1;
2568
2569 REG_WR(bp, BAR_USTRORM_INTMEM +
2570 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2571 pause_enabled);
2572 }
2573 2583
2574 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) { 2584 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2575 struct host_port_stats *pstats; 2585 struct host_port_stats *pstats;
@@ -7845,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
7845{ 7855{
7846 int i; 7856 int i;
7847 7857
7848 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7849 sizeof(struct host_sp_status_block));
7850
7851 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, 7858 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7852 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 7859 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7853 7860
7861 if (IS_VF(bp))
7862 return;
7863
7864 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7865 sizeof(struct host_sp_status_block));
7866
7854 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 7867 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7855 sizeof(struct bnx2x_slowpath)); 7868 sizeof(struct bnx2x_slowpath));
7856 7869
@@ -9645,6 +9658,12 @@ sp_rtnl_not_reset:
9645 &bp->sp_rtnl_state)) 9658 &bp->sp_rtnl_state))
9646 bnx2x_pf_set_vfs_vlan(bp); 9659 bnx2x_pf_set_vfs_vlan(bp);
9647 9660
9661 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
9662 bnx2x_dcbx_stop_hw_tx(bp);
9663
9664 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
9665 bnx2x_dcbx_resume_hw_tx(bp);
9666
9648 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9667 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9649 * can be called from other contexts as well) 9668 * can be called from other contexts as well)
9650 */ 9669 */
@@ -11147,6 +11166,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11147 int tmp; 11166 int tmp;
11148 u32 cfg; 11167 u32 cfg;
11149 11168
11169 if (IS_VF(bp))
11170 return 0;
11171
11150 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) { 11172 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11151 /* Take function: tmp = func */ 11173 /* Take function: tmp = func */
11152 tmp = BP_ABS_FUNC(bp); 11174 tmp = BP_ABS_FUNC(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 44104fb27947..e8706e19f96f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -522,23 +522,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
522 return 0; 522 return 0;
523} 523}
524 524
525static int
526bnx2x_vfop_config_vlan0(struct bnx2x *bp,
527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
528 bool add)
529{
530 int rc;
531
532 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
533 BNX2X_VLAN_MAC_DEL;
534 vlan_mac->user_req.u.vlan.vlan = 0;
535
536 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
537 if (rc == -EEXIST)
538 rc = 0;
539 return rc;
540}
541
542static int bnx2x_vfop_config_list(struct bnx2x *bp, 525static int bnx2x_vfop_config_list(struct bnx2x *bp,
543 struct bnx2x_vfop_filters *filters, 526 struct bnx2x_vfop_filters *filters,
544 struct bnx2x_vlan_mac_ramrod_params *vlan_mac) 527 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
@@ -643,30 +626,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
643 626
644 case BNX2X_VFOP_VLAN_CONFIG_LIST: 627 case BNX2X_VFOP_VLAN_CONFIG_LIST:
645 /* next state */ 628 /* next state */
646 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; 629 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
647
648 /* remove vlan0 - could be no-op */
649 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
650 if (vfop->rc)
651 goto op_err;
652 630
653 /* Do vlan list config. if this operation fails we try to 631 /* do list config */
654 * restore vlan0 to keep the queue is working order
655 */
656 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); 632 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
657 if (!vfop->rc) { 633 if (!vfop->rc) {
658 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); 634 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
659 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); 635 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
660 } 636 }
661 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
662
663 case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
664 /* next state */
665 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
666
667 if (list_empty(&obj->head))
668 /* add vlan0 */
669 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
670 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); 637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
671 638
672 default: 639 default:
@@ -1747,11 +1714,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
1747 1714
1748void bnx2x_iov_init_dmae(struct bnx2x *bp) 1715void bnx2x_iov_init_dmae(struct bnx2x *bp)
1749{ 1716{
1750 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF"); 1717 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1751 if (!IS_SRIOV(bp)) 1718 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1752 return;
1753
1754 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1755} 1719}
1756 1720
1757static int bnx2x_vf_bus(struct bnx2x *bp, int vfid) 1721static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -2822,6 +2786,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2822 return 0; 2786 return 0;
2823} 2787}
2824 2788
2789struct set_vf_state_cookie {
2790 struct bnx2x_virtf *vf;
2791 u8 state;
2792};
2793
2794void bnx2x_set_vf_state(void *cookie)
2795{
2796 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2797
2798 p->vf->state = p->state;
2799}
2800
2825/* VFOP close (teardown the queues, delete mcasts and close HW) */ 2801/* VFOP close (teardown the queues, delete mcasts and close HW) */
2826static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) 2802static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2827{ 2803{
@@ -2872,7 +2848,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2872op_err: 2848op_err:
2873 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); 2849 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2874op_done: 2850op_done:
2875 vf->state = VF_ACQUIRED; 2851
2852 /* need to make sure there are no outstanding stats ramrods which may
2853 * cause the device to access the VF's stats buffer which it will free
2854 * as soon as we return from the close flow.
2855 */
2856 {
2857 struct set_vf_state_cookie cookie;
2858
2859 cookie.vf = vf;
2860 cookie.state = VF_ACQUIRED;
2861 bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2862 }
2863
2876 DP(BNX2X_MSG_IOV, "set state to acquired\n"); 2864 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2877 bnx2x_vfop_end(bp, vf, vfop); 2865 bnx2x_vfop_end(bp, vf, vfop);
2878} 2866}
@@ -3084,8 +3072,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
3084 pci_disable_sriov(bp->pdev); 3072 pci_disable_sriov(bp->pdev);
3085} 3073}
3086 3074
3087static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx, 3075static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
3088 struct bnx2x_virtf *vf) 3076 struct bnx2x_virtf **vf,
3077 struct pf_vf_bulletin_content **bulletin)
3089{ 3078{
3090 if (bp->state != BNX2X_STATE_OPEN) { 3079 if (bp->state != BNX2X_STATE_OPEN) {
3091 BNX2X_ERR("vf ndo called though PF is down\n"); 3080 BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3092,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3103 return -EINVAL; 3092 return -EINVAL;
3104 } 3093 }
3105 3094
3106 if (!vf) { 3095 /* init members */
3096 *vf = BP_VF(bp, vfidx);
3097 *bulletin = BP_VF_BULLETIN(bp, vfidx);
3098
3099 if (!*vf) {
3107 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n", 3100 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3108 vfidx); 3101 vfidx);
3109 return -EINVAL; 3102 return -EINVAL;
3110 } 3103 }
3111 3104
3105 if (!*bulletin) {
3106 BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
3107 vfidx);
3108 return -EINVAL;
3109 }
3110
3112 return 0; 3111 return 0;
3113} 3112}
3114 3113
@@ -3116,17 +3115,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3116 struct ifla_vf_info *ivi) 3115 struct ifla_vf_info *ivi)
3117{ 3116{
3118 struct bnx2x *bp = netdev_priv(dev); 3117 struct bnx2x *bp = netdev_priv(dev);
3119 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3118 struct bnx2x_virtf *vf = NULL;
3120 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3119 struct pf_vf_bulletin_content *bulletin = NULL;
3121 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj); 3120 struct bnx2x_vlan_mac_obj *mac_obj;
3122 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3121 struct bnx2x_vlan_mac_obj *vlan_obj;
3123 int rc; 3122 int rc;
3124 3123
3125 /* sanity */ 3124 /* sanity and init */
3126 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3125 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3127 if (rc) 3126 if (rc)
3128 return rc; 3127 return rc;
3129 if (!mac_obj || !vlan_obj || !bulletin) { 3128 mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3129 vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3130 if (!mac_obj || !vlan_obj) {
3130 BNX2X_ERR("VF partially initialized\n"); 3131 BNX2X_ERR("VF partially initialized\n");
3131 return -EINVAL; 3132 return -EINVAL;
3132 } 3133 }
@@ -3183,11 +3184,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3183{ 3184{
3184 struct bnx2x *bp = netdev_priv(dev); 3185 struct bnx2x *bp = netdev_priv(dev);
3185 int rc, q_logical_state; 3186 int rc, q_logical_state;
3186 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3187 struct bnx2x_virtf *vf = NULL;
3187 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3188 struct pf_vf_bulletin_content *bulletin = NULL;
3188 3189
3189 /* sanity */ 3190 /* sanity and init */
3190 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3191 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3191 if (rc) 3192 if (rc)
3192 return rc; 3193 return rc;
3193 if (!is_valid_ether_addr(mac)) { 3194 if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3250,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3249{ 3250{
3250 struct bnx2x *bp = netdev_priv(dev); 3251 struct bnx2x *bp = netdev_priv(dev);
3251 int rc, q_logical_state; 3252 int rc, q_logical_state;
3252 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3253 struct bnx2x_virtf *vf = NULL;
3253 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3254 struct pf_vf_bulletin_content *bulletin = NULL;
3254 3255
3255 /* sanity */ 3256 /* sanity and init */
3256 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf); 3257 rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
3257 if (rc) 3258 if (rc)
3258 return rc; 3259 return rc;
3259 3260
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index d63d1327b051..86436c77af03 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
522/* should be called under stats_sema */ 522/* should be called under stats_sema */
523static void __bnx2x_stats_start(struct bnx2x *bp) 523static void __bnx2x_stats_start(struct bnx2x *bp)
524{ 524{
525 /* vfs travel through here as part of the statistics FSM, but no action 525 if (IS_PF(bp)) {
526 * is required 526 if (bp->port.pmf)
527 */ 527 bnx2x_port_stats_init(bp);
528 if (IS_VF(bp))
529 return;
530
531 if (bp->port.pmf)
532 bnx2x_port_stats_init(bp);
533 528
534 else if (bp->func_stx) 529 else if (bp->func_stx)
535 bnx2x_func_stats_init(bp); 530 bnx2x_func_stats_init(bp);
536 531
537 bnx2x_hw_stats_post(bp); 532 bnx2x_hw_stats_post(bp);
538 bnx2x_storm_stats_post(bp); 533 bnx2x_storm_stats_post(bp);
534 }
539 535
540 bp->stats_started = true; 536 bp->stats_started = true;
541} 537}
@@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1997 estats->mac_discard); 1993 estats->mac_discard);
1998 } 1994 }
1999} 1995}
1996
1997void bnx2x_stats_safe_exec(struct bnx2x *bp,
1998 void (func_to_exec)(void *cookie),
1999 void *cookie){
2000 if (down_timeout(&bp->stats_sema, HZ/10))
2001 BNX2X_ERR("Unable to acquire stats lock\n");
2002 bnx2x_stats_comp(bp);
2003 func_to_exec(cookie);
2004 __bnx2x_stats_start(bp);
2005 up(&bp->stats_sema);
2006}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 853824d258e8..f35845006cdd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,6 +539,9 @@ struct bnx2x;
539void bnx2x_memset_stats(struct bnx2x *bp); 539void bnx2x_memset_stats(struct bnx2x *bp);
540void bnx2x_stats_init(struct bnx2x *bp); 540void bnx2x_stats_init(struct bnx2x *bp);
541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 541void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
542void bnx2x_stats_safe_exec(struct bnx2x *bp,
543 void (func_to_exec)(void *cookie),
544 void *cookie);
542 545
543/** 546/**
544 * bnx2x_save_statistics - save statistics when unloading. 547 * bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 181edb522450..3d91a5ec61a4 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev)
2563 /* Wait for all pending tx completions to arrive so that 2563 /* Wait for all pending tx completions to arrive so that
2564 * all tx skbs are freed. 2564 * all tx skbs are freed.
2565 */ 2565 */
2566 be_tx_compl_clean(adapter);
2567 netif_tx_disable(netdev); 2566 netif_tx_disable(netdev);
2567 be_tx_compl_clean(adapter);
2568 2568
2569 be_rx_qs_destroy(adapter); 2569 be_rx_qs_destroy(adapter);
2570 2570
@@ -4373,6 +4373,10 @@ static int be_resume(struct pci_dev *pdev)
4373 pci_set_power_state(pdev, PCI_D0); 4373 pci_set_power_state(pdev, PCI_D0);
4374 pci_restore_state(pdev); 4374 pci_restore_state(pdev);
4375 4375
4376 status = be_fw_wait_ready(adapter);
4377 if (status)
4378 return status;
4379
4376 /* tell fw we're ready to fire cmds */ 4380 /* tell fw we're ready to fire cmds */
4377 status = be_cmd_fw_init(adapter); 4381 status = be_cmd_fw_init(adapter);
4378 if (status) 4382 if (status)
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 77ea0db0bbfc..c610a2716be4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -971,8 +971,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
971 htons(ETH_P_8021Q), 971 htons(ETH_P_8021Q),
972 vlan_tag); 972 vlan_tag);
973 973
974 if (!skb_defer_rx_timestamp(skb)) 974 napi_gro_receive(&fep->napi, skb);
975 napi_gro_receive(&fep->napi, skb);
976 } 975 }
977 976
978 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, 977 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7fbe6abf6054..23de82a9da82 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
3069 jwrite32(jme, JME_APMC, apmc); 3069 jwrite32(jme, JME_APMC, apmc);
3070 } 3070 }
3071 3071
3072 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) 3072 NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
3073 3073
3074 spin_lock_init(&jme->phy_lock); 3074 spin_lock_init(&jme->phy_lock);
3075 spin_lock_init(&jme->macaddr_lock); 3075 spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 3fe09ab2d7c9..32675e16021e 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1171,7 +1171,6 @@ typedef struct {
1171 1171
1172#define NETXEN_DB_MAPSIZE_BYTES 0x1000 1172#define NETXEN_DB_MAPSIZE_BYTES 0x1000
1173 1173
1174#define NETXEN_NETDEV_WEIGHT 128
1175#define NETXEN_ADAPTER_UP_MAGIC 777 1174#define NETXEN_ADAPTER_UP_MAGIC 777
1176#define NETXEN_NIC_PEG_TUNE 0 1175#define NETXEN_NIC_PEG_TUNE 0
1177 1176
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c401b0b4353d..ec4cf7fd4123 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
197 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 197 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
198 sds_ring = &recv_ctx->sds_rings[ring]; 198 sds_ring = &recv_ctx->sds_rings[ring];
199 netif_napi_add(netdev, &sds_ring->napi, 199 netif_napi_add(netdev, &sds_ring->napi,
200 netxen_nic_poll, NETXEN_NETDEV_WEIGHT); 200 netxen_nic_poll, NAPI_POLL_WEIGHT);
201 } 201 }
202 202
203 return 0; 203 return 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b5eb4195fc99..85e5c97191dd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7088 7088
7089 RTL_W8(Cfg9346, Cfg9346_Unlock); 7089 RTL_W8(Cfg9346, Cfg9346_Unlock);
7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable); 7090 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
7091 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); 7091 RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) 7092 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
7093 tp->features |= RTL_FEATURE_WOL; 7093 tp->features |= RTL_FEATURE_WOL;
7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) 7094 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2a469b27a506..30d744235d27 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0); 675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF != 676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF); 677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF; 678 rep_index = spec->type - EFX_FILTER_UC_DEF;
679 ins_index = rep_index; 679 ins_index = rep_index;
680 680
681 spin_lock_bh(&state->lock); 681 spin_lock_bh(&state->lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 03de76c7a177..1c83a44c547b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -71,14 +71,18 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
71 plat->force_sf_dma_mode = 1; 71 plat->force_sf_dma_mode = 1;
72 } 72 }
73 73
74 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); 74 if (of_find_property(np, "snps,pbl", NULL)) {
75 if (!dma_cfg) 75 dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
76 return -ENOMEM; 76 GFP_KERNEL);
77 77 if (!dma_cfg)
78 plat->dma_cfg = dma_cfg; 78 return -ENOMEM;
79 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); 79 plat->dma_cfg = dma_cfg;
80 dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); 80 of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
81 dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); 81 dma_cfg->fixed_burst =
82 of_property_read_bool(np, "snps,fixed-burst");
83 dma_cfg->mixed_burst =
84 of_property_read_bool(np, "snps,mixed-burst");
85 }
82 86
83 return 0; 87 return 0;
84} 88}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ad32af67e618..9c805e0c0cae 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1466{ 1466{
1467 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1467 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1468 /* NAPI */ 1468 /* NAPI */
1469 netif_napi_add(netdev, napi, 1469 netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
1470 gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
1471 netdev->ethtool_ops = &gelic_ether_ethtool_ops; 1470 netdev->ethtool_ops = &gelic_ether_ethtool_ops;
1472 netdev->netdev_ops = &gelic_netdevice_ops; 1471 netdev->netdev_ops = &gelic_netdevice_ops;
1473} 1472}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index a93df6ac1909..309abb472aa2 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -37,7 +37,6 @@
37#define GELIC_NET_RXBUF_ALIGN 128 37#define GELIC_NET_RXBUF_ALIGN 128
38#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ 38#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 39#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
40#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
41#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL 40#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
42 41
43#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ 42#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index e90e1f46121e..64b4639f43b6 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
175 printk(KERN_WARNING "Setting MDIO clock divisor to " 175 printk(KERN_WARNING "Setting MDIO clock divisor to "
176 "default %d\n", DEFAULT_CLOCK_DIVISOR); 176 "default %d\n", DEFAULT_CLOCK_DIVISOR);
177 clk_div = DEFAULT_CLOCK_DIVISOR; 177 clk_div = DEFAULT_CLOCK_DIVISOR;
178 of_node_put(np1);
178 goto issue; 179 goto issue;
179 } 180 }
180 181
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 51f2bc376101..2dcc60fb37f1 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0)); 210 pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211 pci_write_config_byte(pcidev,0x5a,0xc0); 211 pci_write_config_byte(pcidev,0x5a,0xc0);
212 WriteLPCReg(0x28, 0x70 ); 212 WriteLPCReg(0x28, 0x70 );
213 if (via_ircc_open(pcidev, &info, 0x3076) == 0) 213 rc = via_ircc_open(pcidev, &info, 0x3076);
214 rc=0;
215 } else 214 } else
216 rc = -ENODEV; //IR not turn on 215 rc = -ENODEV; //IR not turn on
217 } else { //Not VT1211 216 } else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
249 info.irq=FirIRQ; 248 info.irq=FirIRQ;
250 info.dma=FirDRQ1; 249 info.dma=FirDRQ1;
251 info.dma2=FirDRQ0; 250 info.dma2=FirDRQ0;
252 if (via_ircc_open(pcidev, &info, 0x3096) == 0) 251 rc = via_ircc_open(pcidev, &info, 0x3096);
253 rc=0;
254 } else 252 } else
255 rc = -ENODEV; //IR not turn on !!!!! 253 rc = -ENODEV; //IR not turn on !!!!!
256 }//Not VT1211 254 }//Not VT1211
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index b51db2abfe44..ea53abb20988 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
69 NETIF_F_TSO6 | NETIF_F_UFO) 69 NETIF_F_TSO6 | NETIF_F_UFO)
70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
72
71/* 73/*
72 * RCU usage: 74 * RCU usage:
73 * The macvtap_queue and the macvlan_dev are loosely coupled, the 75 * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
278{ 280{
279 struct macvlan_dev *vlan = netdev_priv(dev); 281 struct macvlan_dev *vlan = netdev_priv(dev);
280 struct macvtap_queue *q = macvtap_get_queue(dev, skb); 282 struct macvtap_queue *q = macvtap_get_queue(dev, skb);
281 netdev_features_t features; 283 netdev_features_t features = TAP_FEATURES;
284
282 if (!q) 285 if (!q)
283 goto drop; 286 goto drop;
284 287
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
287 290
288 skb->dev = dev; 291 skb->dev = dev;
289 /* Apply the forward feature mask so that we perform segmentation 292 /* Apply the forward feature mask so that we perform segmentation
290 * according to users wishes. 293 * according to users wishes. This only works if VNET_HDR is
294 * enabled.
291 */ 295 */
292 features = netif_skb_features(skb) & vlan->tap_features; 296 if (q->flags & IFF_VNET_HDR)
297 features |= vlan->tap_features;
293 if (netif_needs_gso(skb, features)) { 298 if (netif_needs_gso(skb, features)) {
294 struct sk_buff *segs = __skb_gso_segment(skb, features, false); 299 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
295 300
@@ -1064,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
1064 /* tap_features are the same as features on tun/tap and 1069 /* tap_features are the same as features on tun/tap and
1065 * reflect user expectations. 1070 * reflect user expectations.
1066 */ 1071 */
1067 vlan->tap_features = vlan->dev->features & 1072 vlan->tap_features = feature_mask;
1068 (feature_mask | ~TUN_OFFLOADS);
1069 vlan->set_features = features; 1073 vlan->set_features = features;
1070 netdev_update_features(vlan->dev); 1074 netdev_update_features(vlan->dev);
1071 1075
@@ -1161,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1161 TUN_F_TSO_ECN | TUN_F_UFO)) 1165 TUN_F_TSO_ECN | TUN_F_UFO))
1162 return -EINVAL; 1166 return -EINVAL;
1163 1167
1164 /* TODO: only accept frames with the features that
1165 got enabled for forwarded frames */
1166 if (!(q->flags & IFF_VNET_HDR))
1167 return -EINVAL;
1168 rtnl_lock(); 1168 rtnl_lock();
1169 ret = set_offload(q, arg); 1169 ret = set_offload(q, arg);
1170 rtnl_unlock(); 1170 rtnl_unlock();
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 8e7af8354342..138de837977f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,7 +23,7 @@
23#define RTL821x_INER_INIT 0x6400 23#define RTL821x_INER_INIT 0x6400
24#define RTL821x_INSR 0x13 24#define RTL821x_INSR 0x13
25 25
26#define RTL8211E_INER_LINK_STAT 0x10 26#define RTL8211E_INER_LINK_STATUS 0x400
27 27
28MODULE_DESCRIPTION("Realtek PHY driver"); 28MODULE_DESCRIPTION("Realtek PHY driver");
29MODULE_AUTHOR("Johnson Leung"); 29MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
57 57
58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) 58 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
59 err = phy_write(phydev, RTL821x_INER, 59 err = phy_write(phydev, RTL821x_INER,
60 RTL8211E_INER_LINK_STAT); 60 RTL8211E_INER_LINK_STATUS);
61 else 61 else
62 err = phy_write(phydev, RTL821x_INER, 0); 62 err = phy_write(phydev, RTL821x_INER, 0);
63 63
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 872819851aef..25ba7eca9a13 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
400 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 400 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
401 .driver_info = (unsigned long)&cdc_mbim_info_zlp, 401 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
402 }, 402 },
403 /* HP hs2434 Mobile Broadband Module needs ZLPs */
404 { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
405 .driver_info = (unsigned long)&cdc_mbim_info_zlp,
406 },
403 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), 407 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
404 .driver_info = (unsigned long)&cdc_mbim_info, 408 .driver_info = (unsigned long)&cdc_mbim_info,
405 }, 409 },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cba1d46e672e..86292e6aaf49 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2816,13 +2816,16 @@ exit:
2816static int hso_get_config_data(struct usb_interface *interface) 2816static int hso_get_config_data(struct usb_interface *interface)
2817{ 2817{
2818 struct usb_device *usbdev = interface_to_usbdev(interface); 2818 struct usb_device *usbdev = interface_to_usbdev(interface);
2819 u8 config_data[17]; 2819 u8 *config_data = kmalloc(17, GFP_KERNEL);
2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber; 2820 u32 if_num = interface->altsetting->desc.bInterfaceNumber;
2821 s32 result; 2821 s32 result;
2822 2822
2823 if (!config_data)
2824 return -ENOMEM;
2823 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 2825 if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
2824 0x86, 0xC0, 0, 0, config_data, 17, 2826 0x86, 0xC0, 0, 0, config_data, 17,
2825 USB_CTRL_SET_TIMEOUT) != 0x11) { 2827 USB_CTRL_SET_TIMEOUT) != 0x11) {
2828 kfree(config_data);
2826 return -EIO; 2829 return -EIO;
2827 } 2830 }
2828 2831
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface)
2873 if (config_data[16] & 0x1) 2876 if (config_data[16] & 0x1)
2874 result |= HSO_INFO_CRC_BUG; 2877 result |= HSO_INFO_CRC_BUG;
2875 2878
2879 kfree(config_data);
2876 return result; 2880 return result;
2877} 2881}
2878 2882
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface,
2886 struct hso_shared_int *shared_int; 2890 struct hso_shared_int *shared_int;
2887 struct hso_device *tmp_dev = NULL; 2891 struct hso_device *tmp_dev = NULL;
2888 2892
2893 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2894 dev_err(&interface->dev, "Not our interface\n");
2895 return -ENODEV;
2896 }
2897
2889 if_num = interface->altsetting->desc.bInterfaceNumber; 2898 if_num = interface->altsetting->desc.bInterfaceNumber;
2890 2899
2891 /* Get the interface/port specification from either driver_info or from 2900 /* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface,
2895 else 2904 else
2896 port_spec = hso_get_config_data(interface); 2905 port_spec = hso_get_config_data(interface);
2897 2906
2898 if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
2899 dev_err(&interface->dev, "Not our interface\n");
2900 return -ENODEV;
2901 }
2902 /* Check if we need to switch to alt interfaces prior to port 2907 /* Check if we need to switch to alt interfaces prior to port
2903 * configuration */ 2908 * configuration */
2904 if (interface->num_altsetting > 1) 2909 if (interface->num_altsetting > 1)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index e602c9519709..c028df76b564 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
448 struct ieee80211_conf *cur_conf = &priv->hw->conf; 448 struct ieee80211_conf *cur_conf = &priv->hw->conf;
449 bool txok; 449 bool txok;
450 int slot; 450 int slot;
451 int hdrlen, padsize;
451 452
452 slot = strip_drv_header(priv, skb); 453 slot = strip_drv_header(priv, skb);
453 if (slot < 0) { 454 if (slot < 0) {
@@ -504,6 +505,15 @@ send_mac80211:
504 505
505 ath9k_htc_tx_clear_slot(priv, slot); 506 ath9k_htc_tx_clear_slot(priv, slot);
506 507
508 /* Remove padding before handing frame back to mac80211 */
509 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
510
511 padsize = hdrlen & 3;
512 if (padsize && skb->len > hdrlen + padsize) {
513 memmove(skb->data + padsize, skb->data, hdrlen);
514 skb_pull(skb, padsize);
515 }
516
507 /* Send status to mac80211 */ 517 /* Send status to mac80211 */
508 ieee80211_tx_status(priv->hw, skb); 518 ieee80211_tx_status(priv->hw, skb);
509} 519}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 16f8b201642b..026a2a067b46 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -802,7 +802,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
802 IEEE80211_HW_PS_NULLFUNC_STACK | 802 IEEE80211_HW_PS_NULLFUNC_STACK |
803 IEEE80211_HW_SPECTRUM_MGMT | 803 IEEE80211_HW_SPECTRUM_MGMT |
804 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 804 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
805 IEEE80211_HW_SUPPORTS_RC_TABLE; 805 IEEE80211_HW_SUPPORTS_RC_TABLE |
806 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
806 807
807 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 808 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
808 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 809 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1737a3e33685..cb5a65553ac7 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc)
173{ 173{
174 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 174 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
175 175
176 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) || 176 if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
177 AR_SREV_9550(sc->sc_ah))
178 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, 177 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
179 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL)); 178 msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
180 179
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4a33c6e39ca2..349fa22a921a 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size)
1860 IEEE80211_HW_PS_NULLFUNC_STACK | 1860 IEEE80211_HW_PS_NULLFUNC_STACK |
1861 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | 1861 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
1862 IEEE80211_HW_SUPPORTS_RC_TABLE | 1862 IEEE80211_HW_SUPPORTS_RC_TABLE |
1863 IEEE80211_HW_SIGNAL_DBM; 1863 IEEE80211_HW_SIGNAL_DBM |
1864 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
1864 1865
1865 if (!modparam_noht) { 1866 if (!modparam_noht) {
1866 /* 1867 /*
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac074731335a..e5090309824e 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
523 523
524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); 524 data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
525 525
526 memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); 526 memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
527 data->flags = 1; /* has quality information */ 527 data->flags = 1; /* has quality information */
528 memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, 528 memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
529 sizeof(struct iw_quality) * data->length); 529 sizeof(struct iw_quality) * data->length);
530 530
531 kfree(addr); 531 kfree(addr);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index f2ed62e37340..7acf5ee23582 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -4464,9 +4464,9 @@ il4965_irq_tasklet(struct il_priv *il)
4464 set_bit(S_RFKILL, &il->status); 4464 set_bit(S_RFKILL, &il->status);
4465 } else { 4465 } else {
4466 clear_bit(S_RFKILL, &il->status); 4466 clear_bit(S_RFKILL, &il->status);
4467 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4468 il_force_reset(il, true); 4467 il_force_reset(il, true);
4469 } 4468 }
4469 wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
4470 4470
4471 handled |= CSR_INT_BIT_RF_KILL; 4471 handled |= CSR_INT_BIT_RF_KILL;
4472 } 4472 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 822f1a00efbb..319387263e12 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1068 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1069 return; 1069 return;
1070 1070
1071 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 1071 if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
1072 return;
1073
1074 if (ctx->vif)
1072 ieee80211_chswitch_done(ctx->vif, is_success); 1075 ieee80211_chswitch_done(ctx->vif, is_success);
1073} 1076}
1074 1077
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index a70c7b9d9bad..ff8cc75c189d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -97,8 +97,6 @@
97 97
98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) 98#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
99 99
100#define APMG_RTC_INT_STT_RFKILL (0x10000000)
101
102/* Device system time */ 100/* Device system time */
103#define DEVICE_SYSTEM_TIME_REG 0xA0206C 101#define DEVICE_SYSTEM_TIME_REG 0xA0206C
104 102
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index ad9bbca99213..7fd6fbfbc1b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
138 schedule_work(&mvm->roc_done_wk); 138 schedule_work(&mvm->roc_done_wk);
139} 139}
140 140
141static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
142 struct ieee80211_vif *vif,
143 const char *errmsg)
144{
145 if (vif->type != NL80211_IFTYPE_STATION)
146 return false;
147 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
148 return false;
149 if (errmsg)
150 IWL_ERR(mvm, "%s\n", errmsg);
151 ieee80211_connection_loss(vif);
152 return true;
153}
154
141/* 155/*
142 * Handles a FW notification for an event that is known to the driver. 156 * Handles a FW notification for an event that is known to the driver.
143 * 157 *
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
163 * P2P Device discoveribility, while there are other higher priority 177 * P2P Device discoveribility, while there are other higher priority
164 * events in the system). 178 * events in the system).
165 */ 179 */
166 WARN_ONCE(!le32_to_cpu(notif->status), 180 if (WARN_ONCE(!le32_to_cpu(notif->status),
167 "Failed to schedule time event\n"); 181 "Failed to schedule time event\n")) {
182 if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
183 iwl_mvm_te_clear_data(mvm, te_data);
184 return;
185 }
186 }
168 187
169 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { 188 if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
170 IWL_DEBUG_TE(mvm, 189 IWL_DEBUG_TE(mvm,
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
180 * By now, we should have finished association 199 * By now, we should have finished association
181 * and know the dtim period. 200 * and know the dtim period.
182 */ 201 */
183 if (te_data->vif->type == NL80211_IFTYPE_STATION && 202 iwl_mvm_te_check_disconnect(mvm, te_data->vif,
184 (!te_data->vif->bss_conf.assoc || 203 "No assocation and the time event is over already...");
185 !te_data->vif->bss_conf.dtim_period)) {
186 IWL_ERR(mvm,
187 "No assocation and the time event is over already...\n");
188 ieee80211_connection_loss(te_data->vif);
189 }
190
191 iwl_mvm_te_clear_data(mvm, te_data); 204 iwl_mvm_te_clear_data(mvm, te_data);
192 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { 205 } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
193 te_data->running = true; 206 te_data->running = true;
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index f600e68a410a..fd848cd1583e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -888,14 +888,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
888 888
889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 889 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
890 if (hw_rfkill) { 890 if (hw_rfkill) {
891 /*
892 * Clear the interrupt in APMG if the NIC is going down.
893 * Note that when the NIC exits RFkill (else branch), we
894 * can't access prph and the NIC will be reset in
895 * start_hw anyway.
896 */
897 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
898 APMG_RTC_INT_STT_RFKILL);
899 set_bit(STATUS_RFKILL, &trans_pcie->status); 891 set_bit(STATUS_RFKILL, &trans_pcie->status);
900 if (test_and_clear_bit(STATUS_HCMD_ACTIVE, 892 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
901 &trans_pcie->status)) 893 &trans_pcie->status))
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 96cfcdd39079..390e2f058aff 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1502,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1502 spin_lock_init(&trans_pcie->reg_lock); 1502 spin_lock_init(&trans_pcie->reg_lock);
1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 1503 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1504 1504
1505 /* W/A - seems to solve weird behavior. We need to remove this if we
1506 * don't want to stay in L1 all the time. This wastes a lot of power */
1507 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1508 PCIE_LINK_STATE_CLKPM);
1509
1510 if (pci_enable_device(pdev)) { 1505 if (pci_enable_device(pdev)) {
1511 err = -ENODEV; 1506 err = -ENODEV;
1512 goto out_no_pci; 1507 goto out_no_pci;
1513 } 1508 }
1514 1509
1510 /* W/A - seems to solve weird behavior. We need to remove this if we
1511 * don't want to stay in L1 all the time. This wastes a lot of power */
1512 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1513 PCIE_LINK_STATE_CLKPM);
1514
1515 pci_set_master(pdev); 1515 pci_set_master(pdev);
1516 1516
1517 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 1517 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1f80ea5e29dd..1b41c8eda12d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -6133,7 +6133,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
6133 IEEE80211_HW_SUPPORTS_PS | 6133 IEEE80211_HW_SUPPORTS_PS |
6134 IEEE80211_HW_PS_NULLFUNC_STACK | 6134 IEEE80211_HW_PS_NULLFUNC_STACK |
6135 IEEE80211_HW_AMPDU_AGGREGATION | 6135 IEEE80211_HW_AMPDU_AGGREGATION |
6136 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 6136 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
6137 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
6137 6138
6138 /* 6139 /*
6139 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices 6140 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f201d6c8..b8ba1f925e75 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
98 goto exit; 98 goto exit;
99 99
100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, 100 err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
101 USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); 101 USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
102 if (err < 0) 102 if (err < 0)
103 goto exit; 103 goto exit;
104 104
105 memcpy(&ret, buf, sizeof(ret));
106
105 if (ret & 0x80) { 107 if (ret & 0x80) {
106 err = -EIO; 108 err = -EIO;
107 goto exit; 109 goto exit;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6bb7cf2de556..b10ba00cc3e6 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
392 mem = (unsigned long) 392 mem = (unsigned long)
393 dt_alloc(size + 4, __alignof__(struct device_node)); 393 dt_alloc(size + 4, __alignof__(struct device_node));
394 394
395 memset((void *)mem, 0, size);
396
395 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); 397 ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
396 398
397 pr_debug(" unflattening %lx...\n", mem); 399 pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index c47fd1e5450b..94716c779800 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
278{ 278{
279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 279 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
280 struct sunxi_pinctrl_group *g = &pctl->groups[group]; 280 struct sunxi_pinctrl_group *g = &pctl->groups[group];
281 unsigned long flags;
281 u32 val, mask; 282 u32 val, mask;
282 u16 strength; 283 u16 strength;
283 u8 dlevel; 284 u8 dlevel;
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
295 * 3: 40mA 296 * 3: 40mA
296 */ 297 */
297 dlevel = strength / 10 - 1; 298 dlevel = strength / 10 - 1;
299
300 spin_lock_irqsave(&pctl->lock, flags);
301
298 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin)); 302 val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
299 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin); 303 mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
300 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin), 304 writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
301 pctl->membase + sunxi_dlevel_reg(g->pin)); 305 pctl->membase + sunxi_dlevel_reg(g->pin));
306
307 spin_unlock_irqrestore(&pctl->lock, flags);
302 break; 308 break;
303 case PIN_CONFIG_BIAS_PULL_UP: 309 case PIN_CONFIG_BIAS_PULL_UP:
310 spin_lock_irqsave(&pctl->lock, flags);
311
304 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 312 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
305 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 313 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
306 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin), 314 writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
307 pctl->membase + sunxi_pull_reg(g->pin)); 315 pctl->membase + sunxi_pull_reg(g->pin));
316
317 spin_unlock_irqrestore(&pctl->lock, flags);
308 break; 318 break;
309 case PIN_CONFIG_BIAS_PULL_DOWN: 319 case PIN_CONFIG_BIAS_PULL_DOWN:
320 spin_lock_irqsave(&pctl->lock, flags);
321
310 val = readl(pctl->membase + sunxi_pull_reg(g->pin)); 322 val = readl(pctl->membase + sunxi_pull_reg(g->pin));
311 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin); 323 mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
312 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin), 324 writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
313 pctl->membase + sunxi_pull_reg(g->pin)); 325 pctl->membase + sunxi_pull_reg(g->pin));
326
327 spin_unlock_irqrestore(&pctl->lock, flags);
314 break; 328 break;
315 default: 329 default:
316 break; 330 break;
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
360 u8 config) 374 u8 config)
361{ 375{
362 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); 376 struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
377 unsigned long flags;
378 u32 val, mask;
379
380 spin_lock_irqsave(&pctl->lock, flags);
363 381
364 u32 val = readl(pctl->membase + sunxi_mux_reg(pin)); 382 val = readl(pctl->membase + sunxi_mux_reg(pin));
365 u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin); 383 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
366 writel((val & ~mask) | config << sunxi_mux_offset(pin), 384 writel((val & ~mask) | config << sunxi_mux_offset(pin),
367 pctl->membase + sunxi_mux_reg(pin)); 385 pctl->membase + sunxi_mux_reg(pin));
386
387 spin_unlock_irqrestore(&pctl->lock, flags);
368} 388}
369 389
370static int sunxi_pmx_enable(struct pinctrl_dev *pctldev, 390static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
464 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev); 484 struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
465 u32 reg = sunxi_data_reg(offset); 485 u32 reg = sunxi_data_reg(offset);
466 u8 index = sunxi_data_offset(offset); 486 u8 index = sunxi_data_offset(offset);
487 unsigned long flags;
488 u32 regval;
489
490 spin_lock_irqsave(&pctl->lock, flags);
491
492 regval = readl(pctl->membase + reg);
467 493
468 writel((value & DATA_PINS_MASK) << index, pctl->membase + reg); 494 if (value)
495 regval |= BIT(index);
496 else
497 regval &= ~(BIT(index));
498
499 writel(regval, pctl->membase + reg);
500
501 spin_unlock_irqrestore(&pctl->lock, flags);
469} 502}
470 503
471static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc, 504static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
526 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 559 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
527 u32 reg = sunxi_irq_cfg_reg(d->hwirq); 560 u32 reg = sunxi_irq_cfg_reg(d->hwirq);
528 u8 index = sunxi_irq_cfg_offset(d->hwirq); 561 u8 index = sunxi_irq_cfg_offset(d->hwirq);
562 unsigned long flags;
563 u32 regval;
529 u8 mode; 564 u8 mode;
530 565
531 switch (type) { 566 switch (type) {
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
548 return -EINVAL; 583 return -EINVAL;
549 } 584 }
550 585
551 writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg); 586 spin_lock_irqsave(&pctl->lock, flags);
587
588 regval = readl(pctl->membase + reg);
589 regval &= ~IRQ_CFG_IRQ_MASK;
590 writel(regval | (mode << index), pctl->membase + reg);
591
592 spin_unlock_irqrestore(&pctl->lock, flags);
552 593
553 return 0; 594 return 0;
554} 595}
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
560 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq); 601 u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
561 u32 status_reg = sunxi_irq_status_reg(d->hwirq); 602 u32 status_reg = sunxi_irq_status_reg(d->hwirq);
562 u8 status_idx = sunxi_irq_status_offset(d->hwirq); 603 u8 status_idx = sunxi_irq_status_offset(d->hwirq);
604 unsigned long flags;
563 u32 val; 605 u32 val;
564 606
607 spin_lock_irqsave(&pctl->lock, flags);
608
565 /* Mask the IRQ */ 609 /* Mask the IRQ */
566 val = readl(pctl->membase + ctrl_reg); 610 val = readl(pctl->membase + ctrl_reg);
567 writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg); 611 writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
568 612
569 /* Clear the IRQ */ 613 /* Clear the IRQ */
570 writel(1 << status_idx, pctl->membase + status_reg); 614 writel(1 << status_idx, pctl->membase + status_reg);
615
616 spin_unlock_irqrestore(&pctl->lock, flags);
571} 617}
572 618
573static void sunxi_pinctrl_irq_mask(struct irq_data *d) 619static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
575 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); 621 struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
576 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 622 u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
577 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 623 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
624 unsigned long flags;
578 u32 val; 625 u32 val;
579 626
627 spin_lock_irqsave(&pctl->lock, flags);
628
580 /* Mask the IRQ */ 629 /* Mask the IRQ */
581 val = readl(pctl->membase + reg); 630 val = readl(pctl->membase + reg);
582 writel(val & ~(1 << idx), pctl->membase + reg); 631 writel(val & ~(1 << idx), pctl->membase + reg);
632
633 spin_unlock_irqrestore(&pctl->lock, flags);
583} 634}
584 635
585static void sunxi_pinctrl_irq_unmask(struct irq_data *d) 636static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
588 struct sunxi_desc_function *func; 639 struct sunxi_desc_function *func;
589 u32 reg = sunxi_irq_ctrl_reg(d->hwirq); 640 u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
590 u8 idx = sunxi_irq_ctrl_offset(d->hwirq); 641 u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
642 unsigned long flags;
591 u32 val; 643 u32 val;
592 644
593 func = sunxi_pinctrl_desc_find_function_by_pin(pctl, 645 func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
597 /* Change muxing to INT mode */ 649 /* Change muxing to INT mode */
598 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval); 650 sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
599 651
652 spin_lock_irqsave(&pctl->lock, flags);
653
600 /* Unmask the IRQ */ 654 /* Unmask the IRQ */
601 val = readl(pctl->membase + reg); 655 val = readl(pctl->membase + reg);
602 writel(val | (1 << idx), pctl->membase + reg); 656 writel(val | (1 << idx), pctl->membase + reg);
657
658 spin_unlock_irqrestore(&pctl->lock, flags);
603} 659}
604 660
605static struct irq_chip sunxi_pinctrl_irq_chip = { 661static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
752 return -ENOMEM; 808 return -ENOMEM;
753 platform_set_drvdata(pdev, pctl); 809 platform_set_drvdata(pdev, pctl);
754 810
811 spin_lock_init(&pctl->lock);
812
755 pctl->membase = of_iomap(node, 0); 813 pctl->membase = of_iomap(node, 0);
756 if (!pctl->membase) 814 if (!pctl->membase)
757 return -ENOMEM; 815 return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index d68047d8f699..01c494f8a14f 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -14,6 +14,7 @@
14#define __PINCTRL_SUNXI_H 14#define __PINCTRL_SUNXI_H
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/spinlock.h>
17 18
18#define PA_BASE 0 19#define PA_BASE 0
19#define PB_BASE 32 20#define PB_BASE 32
@@ -407,6 +408,7 @@ struct sunxi_pinctrl {
407 unsigned ngroups; 408 unsigned ngroups;
408 int irq; 409 int irq;
409 int irq_array[SUNXI_IRQ_NUMBER]; 410 int irq_array[SUNXI_IRQ_NUMBER];
411 spinlock_t lock;
410 struct pinctrl_dev *pctl_dev; 412 struct pinctrl_dev *pctl_dev;
411}; 413};
412 414
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f8596b300..f9119525f557 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
330 return platform_driver_register(&olpc_ec_plat_driver); 330 return platform_driver_register(&olpc_ec_plat_driver);
331} 331}
332 332
333module_init(olpc_ec_init_module); 333arch_initcall(olpc_ec_init_module);
334 334
335MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 335MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 97bb05edcb5a..d6970f47ae72 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
53#define HPWMI_ALS_QUERY 0x3 53#define HPWMI_ALS_QUERY 0x3
54#define HPWMI_HARDWARE_QUERY 0x4 54#define HPWMI_HARDWARE_QUERY 0x4
55#define HPWMI_WIRELESS_QUERY 0x5 55#define HPWMI_WIRELESS_QUERY 0x5
56#define HPWMI_BIOS_QUERY 0x9
57#define HPWMI_HOTKEY_QUERY 0xc 56#define HPWMI_HOTKEY_QUERY 0xc
58#define HPWMI_WIRELESS2_QUERY 0x1b 57#define HPWMI_WIRELESS2_QUERY 0x1b
59#define HPWMI_POSTCODEERROR_QUERY 0x2a 58#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void)
293 return (state & 0x4) ? 1 : 0; 292 return (state & 0x4) ? 1 : 0;
294} 293}
295 294
296static int hp_wmi_enable_hotkeys(void)
297{
298 int ret;
299 int query = 0x6e;
300
301 ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
302 0);
303
304 if (ret)
305 return -EINVAL;
306 return 0;
307}
308
309static int hp_wmi_set_block(void *data, bool blocked) 295static int hp_wmi_set_block(void *data, bool blocked)
310{ 296{
311 enum hp_wmi_radio r = (enum hp_wmi_radio) data; 297 enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void)
1009 err = hp_wmi_input_setup(); 995 err = hp_wmi_input_setup();
1010 if (err) 996 if (err)
1011 return err; 997 return err;
1012
1013 hp_wmi_enable_hotkeys();
1014 } 998 }
1015 999
1016 if (bios_capable) { 1000 if (bios_capable) {
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 2ac045f27f10..3a1b6bf326a8 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
2440 if (pos < 0) 2440 if (pos < 0)
2441 return pos; 2441 return pos;
2442 2442
2443 return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina"); 2443 return snprintf(buffer, PAGE_SIZE, "%s\n",
2444 pos == SPEED ? "speed" :
2445 pos == STAMINA ? "stamina" :
2446 pos == AUTO ? "auto" : "unknown");
2444} 2447}
2445 2448
2446static int sony_nc_gfx_switch_setup(struct platform_device *pd, 2449static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device)
4320 goto err_free_resources; 4323 goto err_free_resources;
4321 } 4324 }
4322 4325
4323 if (sonypi_compat_init()) 4326 result = sonypi_compat_init();
4327 if (result)
4324 goto err_remove_input; 4328 goto err_remove_input;
4325 4329
4326 /* request io port */ 4330 /* request io port */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe72752..c82fe65c4128 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
102 102
103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) 103 if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
104 zfcp_erp_action_dismiss(&port->erp_action); 104 zfcp_erp_action_dismiss(&port->erp_action);
105 else 105 else {
106 shost_for_each_device(sdev, port->adapter->scsi_host) 106 spin_lock(port->adapter->scsi_host->host_lock);
107 __shost_for_each_device(sdev, port->adapter->scsi_host)
107 if (sdev_to_zfcp(sdev)->port == port) 108 if (sdev_to_zfcp(sdev)->port == port)
108 zfcp_erp_action_dismiss_lun(sdev); 109 zfcp_erp_action_dismiss_lun(sdev);
110 spin_unlock(port->adapter->scsi_host->host_lock);
111 }
109} 112}
110 113
111static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) 114static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
592{ 595{
593 struct scsi_device *sdev; 596 struct scsi_device *sdev;
594 597
595 shost_for_each_device(sdev, port->adapter->scsi_host) 598 spin_lock(port->adapter->scsi_host->host_lock);
599 __shost_for_each_device(sdev, port->adapter->scsi_host)
596 if (sdev_to_zfcp(sdev)->port == port) 600 if (sdev_to_zfcp(sdev)->port == port)
597 _zfcp_erp_lun_reopen(sdev, clear, id, 0); 601 _zfcp_erp_lun_reopen(sdev, clear, id, 0);
602 spin_unlock(port->adapter->scsi_host->host_lock);
598} 603}
599 604
600static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) 605static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1434 atomic_set_mask(common_mask, &port->status); 1439 atomic_set_mask(common_mask, &port->status);
1435 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1440 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1436 1441
1437 shost_for_each_device(sdev, adapter->scsi_host) 1442 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1443 __shost_for_each_device(sdev, adapter->scsi_host)
1438 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1444 atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1445 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1439} 1446}
1440 1447
1441/** 1448/**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
1469 } 1476 }
1470 read_unlock_irqrestore(&adapter->port_list_lock, flags); 1477 read_unlock_irqrestore(&adapter->port_list_lock, flags);
1471 1478
1472 shost_for_each_device(sdev, adapter->scsi_host) { 1479 spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
1480 __shost_for_each_device(sdev, adapter->scsi_host) {
1473 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); 1481 atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
1474 if (clear_counter) 1482 if (clear_counter)
1475 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1483 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1476 } 1484 }
1485 spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
1477} 1486}
1478 1487
1479/** 1488/**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
1487{ 1496{
1488 struct scsi_device *sdev; 1497 struct scsi_device *sdev;
1489 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1498 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1499 unsigned long flags;
1490 1500
1491 atomic_set_mask(mask, &port->status); 1501 atomic_set_mask(mask, &port->status);
1492 1502
1493 if (!common_mask) 1503 if (!common_mask)
1494 return; 1504 return;
1495 1505
1496 shost_for_each_device(sdev, port->adapter->scsi_host) 1506 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1507 __shost_for_each_device(sdev, port->adapter->scsi_host)
1497 if (sdev_to_zfcp(sdev)->port == port) 1508 if (sdev_to_zfcp(sdev)->port == port)
1498 atomic_set_mask(common_mask, 1509 atomic_set_mask(common_mask,
1499 &sdev_to_zfcp(sdev)->status); 1510 &sdev_to_zfcp(sdev)->status);
1511 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1500} 1512}
1501 1513
1502/** 1514/**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1511 struct scsi_device *sdev; 1523 struct scsi_device *sdev;
1512 u32 common_mask = mask & ZFCP_COMMON_FLAGS; 1524 u32 common_mask = mask & ZFCP_COMMON_FLAGS;
1513 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; 1525 u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
1526 unsigned long flags;
1514 1527
1515 atomic_clear_mask(mask, &port->status); 1528 atomic_clear_mask(mask, &port->status);
1516 1529
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
1520 if (clear_counter) 1533 if (clear_counter)
1521 atomic_set(&port->erp_counter, 0); 1534 atomic_set(&port->erp_counter, 0);
1522 1535
1523 shost_for_each_device(sdev, port->adapter->scsi_host) 1536 spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
1537 __shost_for_each_device(sdev, port->adapter->scsi_host)
1524 if (sdev_to_zfcp(sdev)->port == port) { 1538 if (sdev_to_zfcp(sdev)->port == port) {
1525 atomic_clear_mask(common_mask, 1539 atomic_clear_mask(common_mask,
1526 &sdev_to_zfcp(sdev)->status); 1540 &sdev_to_zfcp(sdev)->status);
1527 if (clear_counter) 1541 if (clear_counter)
1528 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); 1542 atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
1529 } 1543 }
1544 spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
1530} 1545}
1531 1546
1532/** 1547/**
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cfaaf85..de0598eaacd2 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
224 224
225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 225static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
226{ 226{
227 spin_lock_irq(&qdio->req_q_lock);
228 if (atomic_read(&qdio->req_q_free) || 227 if (atomic_read(&qdio->req_q_free) ||
229 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 228 !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
230 return 1; 229 return 1;
231 spin_unlock_irq(&qdio->req_q_lock);
232 return 0; 230 return 0;
233} 231}
234 232
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
246{ 244{
247 long ret; 245 long ret;
248 246
249 spin_unlock_irq(&qdio->req_q_lock); 247 ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
250 ret = wait_event_interruptible_timeout(qdio->req_q_wq, 248 zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
251 zfcp_qdio_sbal_check(qdio), 5 * HZ);
252 249
253 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) 250 if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
254 return -EIO; 251 return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
262 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); 259 zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
263 } 260 }
264 261
265 spin_lock_irq(&qdio->req_q_lock);
266 return -EIO; 262 return -EIO;
267} 263}
268 264
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf0609f..890639274bcf 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ 27static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
28 zfcp_sysfs_##_feat##_##_name##_show, NULL); 28 zfcp_sysfs_##_feat##_##_name##_show, NULL);
29 29
30#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
31static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\
33 char *buf) \
34{ \
35 return sprintf(buf, _format, _value); \
36} \
37static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
38 zfcp_sysfs_##_feat##_##_name##_show, NULL);
39
30#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ 40#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
31static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ 41static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
32 struct device_attribute *at,\ 42 struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
75ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", 85ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
76 (zfcp_unit_sdev_status(unit) & 86 (zfcp_unit_sdev_status(unit) &
77 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); 87 ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
88ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
89ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
78 90
79static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, 91static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
80 struct device_attribute *attr, 92 struct device_attribute *attr,
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
347 &dev_attr_unit_in_recovery.attr, 359 &dev_attr_unit_in_recovery.attr,
348 &dev_attr_unit_status.attr, 360 &dev_attr_unit_status.attr,
349 &dev_attr_unit_access_denied.attr, 361 &dev_attr_unit_access_denied.attr,
362 &dev_attr_unit_access_shared.attr,
363 &dev_attr_unit_access_readonly.attr,
350 NULL 364 NULL
351}; 365};
352static struct attribute_group zfcp_unit_attr_group = { 366static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918e0d65..92ff027746f2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1353,7 +1353,6 @@ config SCSI_LPFC
1353 tristate "Emulex LightPulse Fibre Channel Support" 1353 tristate "Emulex LightPulse Fibre Channel Support"
1354 depends on PCI && SCSI 1354 depends on PCI && SCSI
1355 select SCSI_FC_ATTRS 1355 select SCSI_FC_ATTRS
1356 select GENERIC_CSUM
1357 select CRC_T10DIF 1356 select CRC_T10DIF
1358 help 1357 help
1359 This lpfc driver supports the Emulex LightPulse 1358 This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5713c1..b3b5125faa72 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
482 ret = comedi_device_postconfig(dev); 482 ret = comedi_device_postconfig(dev);
483 if (ret < 0) { 483 if (ret < 0) {
484 comedi_device_detach(dev); 484 comedi_device_detach(dev);
485 module_put(dev->driver->module); 485 module_put(driv->module);
486 } 486 }
487 /* On success, the driver module count has been incremented. */ 487 /* On success, the driver module count has been incremented. */
488 return ret; 488 return ret;
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 3396eb9d57a3..ac2767100df5 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
341 341
342 pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); 342 pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
343 343
344 /* Try for up to 200s */ 344 /* Try for up to 400ms */
345 for (timeout = 0; timeout < 20; timeout++) { 345 for (timeout = 0; timeout < 40; timeout++) {
346 if (pv->established) 346 if (pv->established)
347 goto established; 347 goto established;
348 if (!hvsi_get_packet(pv)) 348 if (!hvsi_get_packet(pv))
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e241894..279b04910f00 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -304,6 +304,13 @@ static int __init ohci_pci_init(void)
304 pr_info("%s: " DRIVER_DESC "\n", hcd_name); 304 pr_info("%s: " DRIVER_DESC "\n", hcd_name);
305 305
306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); 306 ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
307
308#ifdef CONFIG_PM
309 /* Entries for the PCI suspend/resume callbacks are special */
310 ohci_pci_hc_driver.pci_suspend = ohci_suspend;
311 ohci_pci_hc_driver.pci_resume = ohci_resume;
312#endif
313
307 return pci_register_driver(&ohci_pci_driver); 314 return pci_register_driver(&ohci_pci_driver);
308} 315}
309module_init(ohci_pci_init); 316module_init(ohci_pci_init);
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca266280895d..e1859b8ef567 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
15 * 675 Mass Ave, Cambridge, MA 02139, USA. 15 * 675 Mass Ave, Cambridge, MA 02139, USA.
16 */ 16 */
17 17
18#include "otg_fsm.h" 18#include "phy-fsm-usb.h"
19#include <linux/usb/otg.h> 19#include <linux/usb/otg.h>
20#include <linux/ioctl.h> 20#include <linux/ioctl.h>
21 21
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b3548e7c..7f4596606e18 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
29#include <linux/usb/gadget.h> 29#include <linux/usb/gadget.h>
30#include <linux/usb/otg.h> 30#include <linux/usb/otg.h>
31 31
32#include "phy-otg-fsm.h" 32#include "phy-fsm-usb.h"
33 33
34/* Change USB protocol when there is a protocol change */ 34/* Change USB protocol when there is a protocol change */
35static int otg_set_protocol(struct otg_fsm *fsm, int protocol) 35static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a58ac435a9a4..5e8be462aed5 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)
348 348
349 for_each_possible_cpu(i) 349 for_each_possible_cpu(i)
350 memset(per_cpu(cpu_evtchn_mask, i), 350 memset(per_cpu(cpu_evtchn_mask, i),
351 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); 351 (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
352} 352}
353 353
354static inline void clear_evtchn(int port) 354static inline void clear_evtchn(int port)
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
1493/* Rebind an evtchn so that it gets delivered to a specific cpu */ 1493/* Rebind an evtchn so that it gets delivered to a specific cpu */
1494static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) 1494static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1495{ 1495{
1496 struct shared_info *s = HYPERVISOR_shared_info;
1496 struct evtchn_bind_vcpu bind_vcpu; 1497 struct evtchn_bind_vcpu bind_vcpu;
1497 int evtchn = evtchn_from_irq(irq); 1498 int evtchn = evtchn_from_irq(irq);
1499 int masked;
1498 1500
1499 if (!VALID_EVTCHN(evtchn)) 1501 if (!VALID_EVTCHN(evtchn))
1500 return -1; 1502 return -1;
@@ -1511,6 +1513,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1511 bind_vcpu.vcpu = tcpu; 1513 bind_vcpu.vcpu = tcpu;
1512 1514
1513 /* 1515 /*
1516 * Mask the event while changing the VCPU binding to prevent
1517 * it being delivered on an unexpected VCPU.
1518 */
1519 masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
1520
1521 /*
1514 * If this fails, it usually just indicates that we're dealing with a 1522 * If this fails, it usually just indicates that we're dealing with a
1515 * virq or IPI channel, which don't actually need to be rebound. Ignore 1523 * virq or IPI channel, which don't actually need to be rebound. Ignore
1516 * it, but don't do the xenlinux-level rebind in that case. 1524 * it, but don't do the xenlinux-level rebind in that case.
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1518 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) 1526 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1519 bind_evtchn_to_cpu(evtchn, tcpu); 1527 bind_evtchn_to_cpu(evtchn, tcpu);
1520 1528
1529 if (!masked)
1530 unmask_evtchn(evtchn);
1531
1521 return 0; 1532 return 0;
1522} 1533}
1523 1534
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb93419..8defc6b3f9a2 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
40 int block, off; 40 int block, off;
41 41
42 inode = iget_locked(sb, ino); 42 inode = iget_locked(sb, ino);
43 if (IS_ERR(inode)) 43 if (!inode)
44 return ERR_PTR(-ENOMEM); 44 return ERR_PTR(-ENOMEM);
45 if (!(inode->i_state & I_NEW)) 45 if (!(inode->i_state & I_NEW))
46 return inode; 46 return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04dba77..c5eae7251490 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
1045int bio_uncopy_user(struct bio *bio) 1045int bio_uncopy_user(struct bio *bio)
1046{ 1046{
1047 struct bio_map_data *bmd = bio->bi_private; 1047 struct bio_map_data *bmd = bio->bi_private;
1048 int ret = 0; 1048 struct bio_vec *bvec;
1049 int ret = 0, i;
1049 1050
1050 if (!bio_flagged(bio, BIO_NULL_MAPPED)) 1051 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1051 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1052 /*
1052 bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1053 * if we're in a workqueue, the request is orphaned, so
1053 0, bmd->is_our_pages); 1054 * don't copy into a random user address space, just free.
1055 */
1056 if (current->mm)
1057 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
1058 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
1059 0, bmd->is_our_pages);
1060 else if (bmd->is_our_pages)
1061 bio_for_each_segment_all(bvec, bio, i)
1062 __free_page(bvec->bv_page);
1063 }
1054 bio_free_map_data(bmd); 1064 bio_free_map_data(bmd);
1055 bio_put(bio); 1065 bio_put(bio);
1056 return ret; 1066 return ret;
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb5329c3c..b949af850cd6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
229 */ 229 */
230static void d_free(struct dentry *dentry) 230static void d_free(struct dentry *dentry)
231{ 231{
232 BUG_ON(dentry->d_count); 232 BUG_ON(dentry->d_lockref.count);
233 this_cpu_dec(nr_dentry); 233 this_cpu_dec(nr_dentry);
234 if (dentry->d_op && dentry->d_op->d_release) 234 if (dentry->d_op && dentry->d_op->d_release)
235 dentry->d_op->d_release(dentry); 235 dentry->d_op->d_release(dentry);
@@ -467,7 +467,7 @@ relock:
467 } 467 }
468 468
469 if (ref) 469 if (ref)
470 dentry->d_count--; 470 dentry->d_lockref.count--;
471 /* 471 /*
472 * inform the fs via d_prune that this dentry is about to be 472 * inform the fs via d_prune that this dentry is about to be
473 * unhashed and destroyed. 473 * unhashed and destroyed.
@@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
513 return; 513 return;
514 514
515repeat: 515repeat:
516 if (dentry->d_count == 1) 516 if (dentry->d_lockref.count == 1)
517 might_sleep(); 517 might_sleep();
518 spin_lock(&dentry->d_lock); 518 if (lockref_put_or_lock(&dentry->d_lockref))
519 BUG_ON(!dentry->d_count);
520 if (dentry->d_count > 1) {
521 dentry->d_count--;
522 spin_unlock(&dentry->d_lock);
523 return; 519 return;
524 }
525 520
526 if (dentry->d_flags & DCACHE_OP_DELETE) { 521 if (dentry->d_flags & DCACHE_OP_DELETE) {
527 if (dentry->d_op->d_delete(dentry)) 522 if (dentry->d_op->d_delete(dentry))
@@ -535,7 +530,7 @@ repeat:
535 dentry->d_flags |= DCACHE_REFERENCED; 530 dentry->d_flags |= DCACHE_REFERENCED;
536 dentry_lru_add(dentry); 531 dentry_lru_add(dentry);
537 532
538 dentry->d_count--; 533 dentry->d_lockref.count--;
539 spin_unlock(&dentry->d_lock); 534 spin_unlock(&dentry->d_lock);
540 return; 535 return;
541 536
@@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
590 * We also need to leave mountpoints alone, 585 * We also need to leave mountpoints alone,
591 * directory or not. 586 * directory or not.
592 */ 587 */
593 if (dentry->d_count > 1 && dentry->d_inode) { 588 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
594 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { 589 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
595 spin_unlock(&dentry->d_lock); 590 spin_unlock(&dentry->d_lock);
596 return -EBUSY; 591 return -EBUSY;
@@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate);
606/* This must be called with d_lock held */ 601/* This must be called with d_lock held */
607static inline void __dget_dlock(struct dentry *dentry) 602static inline void __dget_dlock(struct dentry *dentry)
608{ 603{
609 dentry->d_count++; 604 dentry->d_lockref.count++;
610} 605}
611 606
612static inline void __dget(struct dentry *dentry) 607static inline void __dget(struct dentry *dentry)
613{ 608{
614 spin_lock(&dentry->d_lock); 609 lockref_get(&dentry->d_lockref);
615 __dget_dlock(dentry);
616 spin_unlock(&dentry->d_lock);
617} 610}
618 611
619struct dentry *dget_parent(struct dentry *dentry) 612struct dentry *dget_parent(struct dentry *dentry)
@@ -634,8 +627,8 @@ repeat:
634 goto repeat; 627 goto repeat;
635 } 628 }
636 rcu_read_unlock(); 629 rcu_read_unlock();
637 BUG_ON(!ret->d_count); 630 BUG_ON(!ret->d_lockref.count);
638 ret->d_count++; 631 ret->d_lockref.count++;
639 spin_unlock(&ret->d_lock); 632 spin_unlock(&ret->d_lock);
640 return ret; 633 return ret;
641} 634}
@@ -718,7 +711,7 @@ restart:
718 spin_lock(&inode->i_lock); 711 spin_lock(&inode->i_lock);
719 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { 712 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
720 spin_lock(&dentry->d_lock); 713 spin_lock(&dentry->d_lock);
721 if (!dentry->d_count) { 714 if (!dentry->d_lockref.count) {
722 __dget_dlock(dentry); 715 __dget_dlock(dentry);
723 __d_drop(dentry); 716 __d_drop(dentry);
724 spin_unlock(&dentry->d_lock); 717 spin_unlock(&dentry->d_lock);
@@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
763 /* Prune ancestors. */ 756 /* Prune ancestors. */
764 dentry = parent; 757 dentry = parent;
765 while (dentry) { 758 while (dentry) {
766 spin_lock(&dentry->d_lock); 759 if (lockref_put_or_lock(&dentry->d_lockref))
767 if (dentry->d_count > 1) {
768 dentry->d_count--;
769 spin_unlock(&dentry->d_lock);
770 return; 760 return;
771 }
772 dentry = dentry_kill(dentry, 1); 761 dentry = dentry_kill(dentry, 1);
773 } 762 }
774} 763}
@@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list)
793 * the LRU because of laziness during lookup. Do not free 782 * the LRU because of laziness during lookup. Do not free
794 * it - just keep it off the LRU list. 783 * it - just keep it off the LRU list.
795 */ 784 */
796 if (dentry->d_count) { 785 if (dentry->d_lockref.count) {
797 dentry_lru_del(dentry); 786 dentry_lru_del(dentry);
798 spin_unlock(&dentry->d_lock); 787 spin_unlock(&dentry->d_lock);
799 continue; 788 continue;
@@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
913 dentry_lru_del(dentry); 902 dentry_lru_del(dentry);
914 __d_shrink(dentry); 903 __d_shrink(dentry);
915 904
916 if (dentry->d_count != 0) { 905 if (dentry->d_lockref.count != 0) {
917 printk(KERN_ERR 906 printk(KERN_ERR
918 "BUG: Dentry %p{i=%lx,n=%s}" 907 "BUG: Dentry %p{i=%lx,n=%s}"
919 " still in use (%d)" 908 " still in use (%d)"
@@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
922 dentry->d_inode ? 911 dentry->d_inode ?
923 dentry->d_inode->i_ino : 0UL, 912 dentry->d_inode->i_ino : 0UL,
924 dentry->d_name.name, 913 dentry->d_name.name,
925 dentry->d_count, 914 dentry->d_lockref.count,
926 dentry->d_sb->s_type->name, 915 dentry->d_sb->s_type->name,
927 dentry->d_sb->s_id); 916 dentry->d_sb->s_id);
928 BUG(); 917 BUG();
@@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
933 list_del(&dentry->d_u.d_child); 922 list_del(&dentry->d_u.d_child);
934 } else { 923 } else {
935 parent = dentry->d_parent; 924 parent = dentry->d_parent;
936 parent->d_count--; 925 parent->d_lockref.count--;
937 list_del(&dentry->d_u.d_child); 926 list_del(&dentry->d_u.d_child);
938 } 927 }
939 928
@@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
981 970
982 dentry = sb->s_root; 971 dentry = sb->s_root;
983 sb->s_root = NULL; 972 sb->s_root = NULL;
984 dentry->d_count--; 973 dentry->d_lockref.count--;
985 shrink_dcache_for_umount_subtree(dentry); 974 shrink_dcache_for_umount_subtree(dentry);
986 975
987 while (!hlist_bl_empty(&sb->s_anon)) { 976 while (!hlist_bl_empty(&sb->s_anon)) {
@@ -1147,7 +1136,7 @@ resume:
1147 * loop in shrink_dcache_parent() might not make any progress 1136 * loop in shrink_dcache_parent() might not make any progress
1148 * and loop forever. 1137 * and loop forever.
1149 */ 1138 */
1150 if (dentry->d_count) { 1139 if (dentry->d_lockref.count) {
1151 dentry_lru_del(dentry); 1140 dentry_lru_del(dentry);
1152 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1141 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1153 dentry_lru_move_list(dentry, dispose); 1142 dentry_lru_move_list(dentry, dispose);
@@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1269 smp_wmb(); 1258 smp_wmb();
1270 dentry->d_name.name = dname; 1259 dentry->d_name.name = dname;
1271 1260
1272 dentry->d_count = 1; 1261 dentry->d_lockref.count = 1;
1273 dentry->d_flags = 0; 1262 dentry->d_flags = 0;
1274 spin_lock_init(&dentry->d_lock); 1263 spin_lock_init(&dentry->d_lock);
1275 seqcount_init(&dentry->d_seq); 1264 seqcount_init(&dentry->d_seq);
@@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
1970 goto next; 1959 goto next;
1971 } 1960 }
1972 1961
1973 dentry->d_count++; 1962 dentry->d_lockref.count++;
1974 found = dentry; 1963 found = dentry;
1975 spin_unlock(&dentry->d_lock); 1964 spin_unlock(&dentry->d_lock);
1976 break; 1965 break;
@@ -2069,7 +2058,7 @@ again:
2069 spin_lock(&dentry->d_lock); 2058 spin_lock(&dentry->d_lock);
2070 inode = dentry->d_inode; 2059 inode = dentry->d_inode;
2071 isdir = S_ISDIR(inode->i_mode); 2060 isdir = S_ISDIR(inode->i_mode);
2072 if (dentry->d_count == 1) { 2061 if (dentry->d_lockref.count == 1) {
2073 if (!spin_trylock(&inode->i_lock)) { 2062 if (!spin_trylock(&inode->i_lock)) {
2074 spin_unlock(&dentry->d_lock); 2063 spin_unlock(&dentry->d_lock);
2075 cpu_relax(); 2064 cpu_relax();
@@ -2724,6 +2713,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2724 return memcpy(buffer, temp, sz); 2713 return memcpy(buffer, temp, sz);
2725} 2714}
2726 2715
2716char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
2717{
2718 char *end = buffer + buflen;
2719 /* these dentries are never renamed, so d_lock is not needed */
2720 if (prepend(&end, &buflen, " (deleted)", 11) ||
2721 prepend_name(&end, &buflen, &dentry->d_name) ||
2722 prepend(&end, &buflen, "/", 1))
2723 end = ERR_PTR(-ENAMETOOLONG);
2724 return end;
2725}
2726
2727/* 2727/*
2728 * Write full pathname from the root of the filesystem into the buffer. 2728 * Write full pathname from the root of the filesystem into the buffer.
2729 */ 2729 */
@@ -2937,7 +2937,7 @@ resume:
2937 } 2937 }
2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { 2938 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2939 dentry->d_flags |= DCACHE_GENOCIDE; 2939 dentry->d_flags |= DCACHE_GENOCIDE;
2940 dentry->d_count--; 2940 dentry->d_lockref.count--;
2941 } 2941 }
2942 spin_unlock(&dentry->d_lock); 2942 spin_unlock(&dentry->d_lock);
2943 } 2943 }
@@ -2945,7 +2945,7 @@ resume:
2945 struct dentry *child = this_parent; 2945 struct dentry *child = this_parent;
2946 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { 2946 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2947 this_parent->d_flags |= DCACHE_GENOCIDE; 2947 this_parent->d_flags |= DCACHE_GENOCIDE;
2948 this_parent->d_count--; 2948 this_parent->d_lockref.count--;
2949 } 2949 }
2950 this_parent = try_to_ascend(this_parent, locked, seq); 2950 this_parent = try_to_ascend(this_parent, locked, seq);
2951 if (!this_parent) 2951 if (!this_parent)
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb2c474..d15ccf20f1b3 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
57 struct inode *inode; 57 struct inode *inode;
58 58
59 inode = iget_locked(super, ino); 59 inode = iget_locked(super, ino);
60 if (IS_ERR(inode)) 60 if (!inode)
61 return ERR_PTR(-ENOMEM); 61 return ERR_PTR(-ENOMEM);
62 if (!(inode->i_state & I_NEW)) 62 if (!(inode->i_state & I_NEW))
63 return inode; 63 return inode;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 34423978b170..d19b30ababf1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -926,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
926 return h - hstates; 926 return h - hstates;
927} 927}
928 928
929static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
930{
931 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
932 dentry->d_name.name);
933}
934
935static struct dentry_operations anon_ops = { 929static struct dentry_operations anon_ops = {
936 .d_dname = hugetlb_dname 930 .d_dname = simple_dname
937}; 931};
938 932
939/* 933/*
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 8743ba9c6742..984c2bbf4f61 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3047 3047
3048 dir_index = (u32) ctx->pos; 3048 dir_index = (u32) ctx->pos;
3049 3049
3050 /*
3051 * NFSv4 reserves cookies 1 and 2 for . and .. so the value
3052 * we return to the vfs is one greater than the one we use
3053 * internally.
3054 */
3055 if (dir_index)
3056 dir_index--;
3057
3050 if (dir_index > 1) { 3058 if (dir_index > 1) {
3051 struct dir_table_slot dirtab_slot; 3059 struct dir_table_slot dirtab_slot;
3052 3060
@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3086 if (p->header.flag & BT_INTERNAL) { 3094 if (p->header.flag & BT_INTERNAL) {
3087 jfs_err("jfs_readdir: bad index table"); 3095 jfs_err("jfs_readdir: bad index table");
3088 DT_PUTPAGE(mp); 3096 DT_PUTPAGE(mp);
3089 ctx->pos = -1; 3097 ctx->pos = DIREND;
3090 return 0; 3098 return 0;
3091 } 3099 }
3092 } else { 3100 } else {
@@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3094 /* 3102 /*
3095 * self "." 3103 * self "."
3096 */ 3104 */
3097 ctx->pos = 0; 3105 ctx->pos = 1;
3098 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 3106 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
3099 return 0; 3107 return 0;
3100 } 3108 }
3101 /* 3109 /*
3102 * parent ".." 3110 * parent ".."
3103 */ 3111 */
3104 ctx->pos = 1; 3112 ctx->pos = 2;
3105 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 3113 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
3106 return 0; 3114 return 0;
3107 3115
@@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3122 /* 3130 /*
3123 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 3131 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
3124 * 3132 *
3125 * pn = index = 0: First entry "." 3133 * pn = 0; index = 1: First entry "."
3126 * pn = 0; index = 1: Second entry ".." 3134 * pn = 0; index = 2: Second entry ".."
3127 * pn > 0: Real entries, pn=1 -> leftmost page 3135 * pn > 0: Real entries, pn=1 -> leftmost page
3128 * pn = index = -1: No more entries 3136 * pn = index = -1: No more entries
3129 */ 3137 */
3130 dtpos = ctx->pos; 3138 dtpos = ctx->pos;
3131 if (dtpos == 0) { 3139 if (dtpos < 2) {
3132 /* build "." entry */ 3140 /* build "." entry */
3141 ctx->pos = 1;
3133 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) 3142 if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
3134 return 0; 3143 return 0;
3135 dtoffset->index = 1; 3144 dtoffset->index = 2;
3136 ctx->pos = dtpos; 3145 ctx->pos = dtpos;
3137 } 3146 }
3138 3147
3139 if (dtoffset->pn == 0) { 3148 if (dtoffset->pn == 0) {
3140 if (dtoffset->index == 1) { 3149 if (dtoffset->index == 2) {
3141 /* build ".." entry */ 3150 /* build ".." entry */
3142 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) 3151 if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
3143 return 0; 3152 return 0;
@@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
3228 } 3237 }
3229 jfs_dirent->position = unique_pos++; 3238 jfs_dirent->position = unique_pos++;
3230 } 3239 }
3240 /*
3241 * We add 1 to the index because we may
3242 * use a value of 2 internally, and NFSv4
3243 * doesn't like that.
3244 */
3245 jfs_dirent->position++;
3231 } else { 3246 } else {
3232 jfs_dirent->position = dtpos; 3247 jfs_dirent->position = dtpos;
3233 len = min(d_namleft, DTLHDRDATALEN_LEGACY); 3248 len = min(d_namleft, DTLHDRDATALEN_LEGACY);
diff --git a/fs/namei.c b/fs/namei.c
index 89a612e392eb..7720fbd5277b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -536,8 +536,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
536 * a reference at this point. 536 * a reference at this point.
537 */ 537 */
538 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); 538 BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
539 BUG_ON(!parent->d_count); 539 BUG_ON(!parent->d_lockref.count);
540 parent->d_count++; 540 parent->d_lockref.count++;
541 spin_unlock(&dentry->d_lock); 541 spin_unlock(&dentry->d_lock);
542 } 542 }
543 spin_unlock(&parent->d_lock); 543 spin_unlock(&parent->d_lock);
@@ -3327,7 +3327,7 @@ void dentry_unhash(struct dentry *dentry)
3327{ 3327{
3328 shrink_dcache_parent(dentry); 3328 shrink_dcache_parent(dentry);
3329 spin_lock(&dentry->d_lock); 3329 spin_lock(&dentry->d_lock);
3330 if (dentry->d_count == 1) 3330 if (dentry->d_lockref.count == 1)
3331 __d_drop(dentry); 3331 __d_drop(dentry);
3332 spin_unlock(&dentry->d_lock); 3332 spin_unlock(&dentry->d_lock);
3333} 3333}
@@ -3671,11 +3671,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) 3671 if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
3672 return -EINVAL; 3672 return -EINVAL;
3673 /* 3673 /*
3674 * Using empty names is equivalent to using AT_SYMLINK_FOLLOW 3674 * To use null names we require CAP_DAC_READ_SEARCH
3675 * on /proc/self/fd/<fd>. 3675 * This ensures that not everyone will be able to create
3676 * handlink using the passed filedescriptor.
3676 */ 3677 */
3677 if (flags & AT_EMPTY_PATH) 3678 if (flags & AT_EMPTY_PATH) {
3679 if (!capable(CAP_DAC_READ_SEARCH))
3680 return -ENOENT;
3678 how = LOOKUP_EMPTY; 3681 how = LOOKUP_EMPTY;
3682 }
3679 3683
3680 if (flags & AT_SYMLINK_FOLLOW) 3684 if (flags & AT_SYMLINK_FOLLOW)
3681 how |= LOOKUP_FOLLOW; 3685 how |= LOOKUP_FOLLOW;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9ba0b0a..a45ba4f267fe 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
1429 CL_COPY_ALL | CL_PRIVATE); 1429 CL_COPY_ALL | CL_PRIVATE);
1430 namespace_unlock(); 1430 namespace_unlock();
1431 if (IS_ERR(tree)) 1431 if (IS_ERR(tree))
1432 return NULL; 1432 return ERR_CAST(tree);
1433 return &tree->mnt; 1433 return &tree->mnt;
1434} 1434}
1435 1435
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913784ab..2d8be51f90dc 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
345 345
346 if (err == -EOPNOTSUPP) { 346 if (err == -EOPNOTSUPP) {
347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 347 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
348 bio_put(bio); 348 /* to be detected by nilfs_segbuf_submit_bio() */
349 /* to be detected by submit_seg_bio() */
350 } 349 }
351 350
352 if (!uptodate) 351 if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
377 bio->bi_private = segbuf; 376 bio->bi_private = segbuf;
378 bio_get(bio); 377 bio_get(bio);
379 submit_bio(mode, bio); 378 submit_bio(mode, bio);
379 segbuf->sb_nbio++;
380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 380 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
381 bio_put(bio); 381 bio_put(bio);
382 err = -EOPNOTSUPP; 382 err = -EOPNOTSUPP;
383 goto failed; 383 goto failed;
384 } 384 }
385 segbuf->sb_nbio++;
386 bio_put(bio); 385 bio_put(bio);
387 386
388 wi->bio = NULL; 387 wi->bio = NULL;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 854d80955bf8..121da2dc3be8 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1022 struct inode *inode = NULL; 1022 struct inode *inode = NULL;
1023 struct ocfs2_super *osb = NULL; 1023 struct ocfs2_super *osb = NULL;
1024 struct buffer_head *bh = NULL; 1024 struct buffer_head *bh = NULL;
1025 char nodestr[8]; 1025 char nodestr[12];
1026 struct ocfs2_blockcheck_stats stats; 1026 struct ocfs2_blockcheck_stats stats;
1027 1027
1028 trace_ocfs2_fill_super(sb, data, silent); 1028 trace_ocfs2_fill_super(sb, data, silent);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890abbd8..0ff80f9b930f 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
230 230
231 if (!dir_emit_dots(file, ctx)) 231 if (!dir_emit_dots(file, ctx))
232 goto out; 232 goto out;
233 if (!dir_emit_dots(file, ctx))
234 goto out;
235 files = get_files_struct(p); 233 files = get_files_struct(p);
236 if (!files) 234 if (!files)
237 goto out; 235 goto out;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d468..efdc94434c30 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
9#include <linux/seqlock.h> 9#include <linux/seqlock.h>
10#include <linux/cache.h> 10#include <linux/cache.h>
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h>
12 13
13struct nameidata; 14struct nameidata;
14struct path; 15struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
100# endif 101# endif
101#endif 102#endif
102 103
104#define d_lock d_lockref.lock
105
103struct dentry { 106struct dentry {
104 /* RCU lookup touched fields */ 107 /* RCU lookup touched fields */
105 unsigned int d_flags; /* protected by d_lock */ 108 unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
112 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ 115 unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
113 116
114 /* Ref lookup also touches following */ 117 /* Ref lookup also touches following */
115 unsigned int d_count; /* protected by d_lock */ 118 struct lockref d_lockref; /* per-dentry lock and refcount */
116 spinlock_t d_lock; /* per dentry lock */
117 const struct dentry_operations *d_op; 119 const struct dentry_operations *d_op;
118 struct super_block *d_sb; /* The root of the dentry tree */ 120 struct super_block *d_sb; /* The root of the dentry tree */
119 unsigned long d_time; /* used by d_revalidate */ 121 unsigned long d_time; /* used by d_revalidate */
@@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
318 assert_spin_locked(&dentry->d_lock); 320 assert_spin_locked(&dentry->d_lock);
319 if (!read_seqcount_retry(&dentry->d_seq, seq)) { 321 if (!read_seqcount_retry(&dentry->d_seq, seq)) {
320 ret = 1; 322 ret = 1;
321 dentry->d_count++; 323 dentry->d_lockref.count++;
322 } 324 }
323 325
324 return ret; 326 return ret;
@@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
326 328
327static inline unsigned d_count(const struct dentry *dentry) 329static inline unsigned d_count(const struct dentry *dentry)
328{ 330{
329 return dentry->d_count; 331 return dentry->d_lockref.count;
330} 332}
331 333
332/* validate "insecure" dentry pointer */ 334/* validate "insecure" dentry pointer */
@@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
336 * helper function for dentry_operations.d_dname() members 338 * helper function for dentry_operations.d_dname() members
337 */ 339 */
338extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); 340extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
341extern char *simple_dname(struct dentry *, char *, int);
339 342
340extern char *__d_path(const struct path *, const struct path *, char *, int); 343extern char *__d_path(const struct path *, const struct path *, char *, int);
341extern char *d_absolute_path(const struct path *, char *, int); 344extern char *d_absolute_path(const struct path *, char *, int);
@@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
356static inline struct dentry *dget_dlock(struct dentry *dentry) 359static inline struct dentry *dget_dlock(struct dentry *dentry)
357{ 360{
358 if (dentry) 361 if (dentry)
359 dentry->d_count++; 362 dentry->d_lockref.count++;
360 return dentry; 363 return dentry;
361} 364}
362 365
363static inline struct dentry *dget(struct dentry *dentry) 366static inline struct dentry *dget(struct dentry *dentry)
364{ 367{
365 if (dentry) { 368 if (dentry)
366 spin_lock(&dentry->d_lock); 369 lockref_get(&dentry->d_lockref);
367 dget_dlock(dentry);
368 spin_unlock(&dentry->d_lock);
369 }
370 return dentry; 370 return dentry;
371} 371}
372 372
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23f3474..79640e015a86 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
5 5
6#include <linux/bitmap.h> 6#include <linux/bitmap.h>
7#include <linux/if.h> 7#include <linux/if.h>
8#include <linux/ip.h>
8#include <linux/netdevice.h> 9#include <linux/netdevice.h>
9#include <linux/rcupdate.h> 10#include <linux/rcupdate.h>
10#include <linux/timer.h> 11#include <linux/timer.h>
11#include <linux/sysctl.h> 12#include <linux/sysctl.h>
12#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
13 14
14enum
15{
16 IPV4_DEVCONF_FORWARDING=1,
17 IPV4_DEVCONF_MC_FORWARDING,
18 IPV4_DEVCONF_PROXY_ARP,
19 IPV4_DEVCONF_ACCEPT_REDIRECTS,
20 IPV4_DEVCONF_SECURE_REDIRECTS,
21 IPV4_DEVCONF_SEND_REDIRECTS,
22 IPV4_DEVCONF_SHARED_MEDIA,
23 IPV4_DEVCONF_RP_FILTER,
24 IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
25 IPV4_DEVCONF_BOOTP_RELAY,
26 IPV4_DEVCONF_LOG_MARTIANS,
27 IPV4_DEVCONF_TAG,
28 IPV4_DEVCONF_ARPFILTER,
29 IPV4_DEVCONF_MEDIUM_ID,
30 IPV4_DEVCONF_NOXFRM,
31 IPV4_DEVCONF_NOPOLICY,
32 IPV4_DEVCONF_FORCE_IGMP_VERSION,
33 IPV4_DEVCONF_ARP_ANNOUNCE,
34 IPV4_DEVCONF_ARP_IGNORE,
35 IPV4_DEVCONF_PROMOTE_SECONDARIES,
36 IPV4_DEVCONF_ARP_ACCEPT,
37 IPV4_DEVCONF_ARP_NOTIFY,
38 IPV4_DEVCONF_ACCEPT_LOCAL,
39 IPV4_DEVCONF_SRC_VMARK,
40 IPV4_DEVCONF_PROXY_ARP_PVLAN,
41 IPV4_DEVCONF_ROUTE_LOCALNET,
42 __IPV4_DEVCONF_MAX
43};
44
45#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
46
47struct ipv4_devconf { 15struct ipv4_devconf {
48 void *sysctl; 16 void *sysctl;
49 int data[IPV4_DEVCONF_MAX]; 17 int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95bc766c..b8b7dc755752 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
101#define IP6SKB_FORWARDED 2 101#define IP6SKB_FORWARDED 2
102#define IP6SKB_REROUTED 4 102#define IP6SKB_REROUTED 4
103#define IP6SKB_ROUTERALERT 8 103#define IP6SKB_ROUTERALERT 8
104#define IP6SKB_FRAGMENTED 16
104}; 105};
105 106
106#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 107#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 000000000000..01233e01627a
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,71 @@
1#ifndef __LINUX_LOCKREF_H
2#define __LINUX_LOCKREF_H
3
4/*
5 * Locked reference counts.
6 *
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
14 * example).
15 */
16
17#include <linux/spinlock.h>
18
19struct lockref {
20 spinlock_t lock;
21 unsigned int count;
22};
23
24/**
25 * lockref_get - Increments reference count unconditionally
26 * @lockcnt: pointer to lockref structure
27 *
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
30 */
31static inline void lockref_get(struct lockref *lockref)
32{
33 spin_lock(&lockref->lock);
34 lockref->count++;
35 spin_unlock(&lockref->lock);
36}
37
38/**
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
42 */
43static inline int lockref_get_not_zero(struct lockref *lockref)
44{
45 int retval = 0;
46
47 spin_lock(&lockref->lock);
48 if (lockref->count) {
49 lockref->count++;
50 retval = 1;
51 }
52 spin_unlock(&lockref->lock);
53 return retval;
54}
55
56/**
57 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
60 */
61static inline int lockref_put_or_lock(struct lockref *lockref)
62{
63 spin_lock(&lockref->lock);
64 if (lockref->count <= 1)
65 return 0;
66 lockref->count--;
67 spin_unlock(&lockref->lock);
68 return 1;
69}
70
71#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa16c01..faf4b7c1ad12 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
332 unsigned long pgoff, unsigned long flags); 332 unsigned long pgoff, unsigned long flags);
333#endif 333#endif
334 unsigned long mmap_base; /* base of mmap area */ 334 unsigned long mmap_base; /* base of mmap area */
335 unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
335 unsigned long task_size; /* size of task vm space */ 336 unsigned long task_size; /* size of task vm space */
336 unsigned long highest_vm_end; /* highest vma end address */ 337 unsigned long highest_vm_end; /* highest vma end address */
337 pgd_t * pgd; 338 pgd_t * pgd;
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 10e5947491c7..b4ec59d159ac 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -14,6 +14,10 @@ struct fs_struct;
14 * A structure to contain pointers to all per-process 14 * A structure to contain pointers to all per-process
15 * namespaces - fs (mount), uts, network, sysvipc, etc. 15 * namespaces - fs (mount), uts, network, sysvipc, etc.
16 * 16 *
17 * The pid namespace is an exception -- it's accessed using
18 * task_active_pid_ns. The pid namespace here is the
19 * namespace that children will use.
20 *
17 * 'count' is the number of tasks holding a reference. 21 * 'count' is the number of tasks holding a reference.
18 * The count for each namespace, then, will be the number 22 * The count for each namespace, then, will be the number
19 * of nsproxies pointing to it, not the number of tasks. 23 * of nsproxies pointing to it, not the number of tasks.
@@ -27,7 +31,7 @@ struct nsproxy {
27 struct uts_namespace *uts_ns; 31 struct uts_namespace *uts_ns;
28 struct ipc_namespace *ipc_ns; 32 struct ipc_namespace *ipc_ns;
29 struct mnt_namespace *mnt_ns; 33 struct mnt_namespace *mnt_ns;
30 struct pid_namespace *pid_ns; 34 struct pid_namespace *pid_ns_for_children;
31 struct net *net_ns; 35 struct net *net_ns;
32}; 36};
33extern struct nsproxy init_nsproxy; 37extern struct nsproxy init_nsproxy;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 580a5320cc96..6d91fcb4c5cb 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -16,6 +16,7 @@
16#include <linux/list.h> 16#include <linux/list.h>
17#include <linux/rbtree.h> 17#include <linux/rbtree.h>
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/bug.h>
19 20
20struct module; 21struct module;
21struct device; 22struct device;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e9995eb5985c..078066daffd4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -314,7 +314,6 @@ struct nsproxy;
314struct user_namespace; 314struct user_namespace;
315 315
316#ifdef CONFIG_MMU 316#ifdef CONFIG_MMU
317extern unsigned long mmap_legacy_base(void);
318extern void arch_pick_mmap_layout(struct mm_struct *mm); 317extern void arch_pick_mmap_layout(struct mm_struct *mm);
319extern unsigned long 318extern unsigned long
320arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 319arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7f..a67fc1635592 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
811 __ret; \ 811 __ret; \
812}) 812})
813 813
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \
816do { \
817 DEFINE_WAIT(__wait); \
818 \
819 for (;;) { \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835
836/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
838 * The condition is checked under the lock. This is expected
839 * to be called with the lock taken.
840 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule()
843 * and reacquired afterwards.
844 * @timeout: timeout, in jiffies
845 *
846 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
847 * @condition evaluates to true or signal is received. The @condition is
848 * checked each time the waitqueue @wq is woken up.
849 *
850 * wake_up() has to be called after changing any variable that could
851 * change the result of the wait condition.
852 *
853 * This is supposed to be called while holding the lock. The lock is
854 * dropped before going to sleep and is reacquired afterwards.
855 *
856 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
857 * was interrupted by a signal, and the remaining jiffies otherwise
858 * if the condition evaluated to true before the timeout elapsed.
859 */
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \
862({ \
863 int __ret = timeout; \
864 \
865 if (!(condition)) \
866 __wait_event_interruptible_lock_irq_timeout( \
867 wq, condition, lock, __ret); \
868 __ret; \
869})
870
814 871
815/* 872/*
816 * These are the old interfaces to sleep waiting for an event. 873 * These are the old interfaces to sleep waiting for an event.
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 8a358a2c97e6..829627d7b846 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
123 /* local bh are disabled so it is ok to use _BH */ 123 /* local bh are disabled so it is ok to use _BH */
124 NET_ADD_STATS_BH(sock_net(sk), 124 NET_ADD_STATS_BH(sock_net(sk),
125 LINUX_MIB_BUSYPOLLRXPACKETS, rc); 125 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
126 cpu_relax();
126 127
127 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && 128 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
128 !need_resched() && !busy_loop_timeout(end_time)); 129 !need_resched() && !busy_loop_timeout(end_time));
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93024a47e0e2..8e0b6c856a13 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -61,6 +61,7 @@ struct genl_family {
61 struct list_head ops_list; /* private */ 61 struct list_head ops_list; /* private */
62 struct list_head family_list; /* private */ 62 struct list_head family_list; /* private */
63 struct list_head mcast_groups; /* private */ 63 struct list_head mcast_groups; /* private */
64 struct module *module;
64}; 65};
65 66
66/** 67/**
@@ -121,9 +122,24 @@ struct genl_ops {
121 struct list_head ops_list; 122 struct list_head ops_list;
122}; 123};
123 124
124extern int genl_register_family(struct genl_family *family); 125extern int __genl_register_family(struct genl_family *family);
125extern int genl_register_family_with_ops(struct genl_family *family, 126
127static inline int genl_register_family(struct genl_family *family)
128{
129 family->module = THIS_MODULE;
130 return __genl_register_family(family);
131}
132
133extern int __genl_register_family_with_ops(struct genl_family *family,
126 struct genl_ops *ops, size_t n_ops); 134 struct genl_ops *ops, size_t n_ops);
135
136static inline int genl_register_family_with_ops(struct genl_family *family,
137 struct genl_ops *ops, size_t n_ops)
138{
139 family->module = THIS_MODULE;
140 return __genl_register_family_with_ops(family, ops, n_ops);
141}
142
127extern int genl_unregister_family(struct genl_family *family); 143extern int genl_unregister_family(struct genl_family *family);
128extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); 144extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
129extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); 145extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 260f83f16bcf..f667248202b6 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -135,6 +135,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
135extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, 135extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
136 __be32 mtu); 136 __be32 mtu);
137extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark); 137extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
138extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
139 u32 mark);
138extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); 140extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
139 141
140struct netlink_callback; 142struct netlink_callback;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5b7a3dadadde..551ba6a6a073 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1499,6 +1499,7 @@ enum ieee80211_hw_flags {
1499 IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24, 1499 IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
1500 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25, 1500 IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
1501 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26, 1501 IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
1502 IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
1502}; 1503};
1503 1504
1504/** 1505/**
diff --git a/include/net/route.h b/include/net/route.h
index 2ea40c1b5e00..afdeeb5bec25 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
317 return hoplimit; 317 return hoplimit;
318} 318}
319 319
320static inline int ip_skb_dst_mtu(struct sk_buff *skb)
321{
322 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
323
324 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
325 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
326}
327
320#endif /* _ROUTE_H */ 328#endif /* _ROUTE_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 94ce082b29dc..e823786e7c66 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
341 struct sk_buff *skb); 341 struct sk_buff *skb);
342 int (*transport_finish)(struct sk_buff *skb, 342 int (*transport_finish)(struct sk_buff *skb,
343 int async); 343 int async);
344 void (*local_error)(struct sk_buff *skb, u32 mtu);
344}; 345};
345 346
346extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 347extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
347extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 348extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
349extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
350extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
348 351
349extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 352extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
350 353
@@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1477extern int xfrm_output_resume(struct sk_buff *skb, int err); 1480extern int xfrm_output_resume(struct sk_buff *skb, int err);
1478extern int xfrm_output(struct sk_buff *skb); 1481extern int xfrm_output(struct sk_buff *skb);
1479extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); 1482extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1483extern void xfrm_local_error(struct sk_buff *skb, int mtu);
1480extern int xfrm4_extract_header(struct sk_buff *skb); 1484extern int xfrm4_extract_header(struct sk_buff *skb);
1481extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1485extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1482extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1486extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
1497extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1501extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1498extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); 1502extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
1499extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); 1503extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
1504extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1500extern int xfrm6_extract_header(struct sk_buff *skb); 1505extern int xfrm6_extract_header(struct sk_buff *skb);
1501extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1506extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1502extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); 1507extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
1514extern int xfrm6_output_finish(struct sk_buff *skb); 1519extern int xfrm6_output_finish(struct sk_buff *skb);
1515extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 1520extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1516 u8 **prevhdr); 1521 u8 **prevhdr);
1522extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1517 1523
1518#ifdef CONFIG_XFRM 1524#ifdef CONFIG_XFRM
1519extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1525extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h
index bc51f77db918..1217f751a1bc 100644
--- a/include/uapi/linux/cm4000_cs.h
+++ b/include/uapi/linux/cm4000_cs.h
@@ -2,6 +2,7 @@
2#define _UAPI_CM4000_H_ 2#define _UAPI_CM4000_H_
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/ioctl.h>
5 6
6#define MAX_ATR 33 7#define MAX_ATR 33
7 8
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 6cf06bfd841b..2fee45bdec0a 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -133,4 +133,38 @@ struct ip_beet_phdr {
133 __u8 reserved; 133 __u8 reserved;
134}; 134};
135 135
136/* index values for the variables in ipv4_devconf */
137enum
138{
139 IPV4_DEVCONF_FORWARDING=1,
140 IPV4_DEVCONF_MC_FORWARDING,
141 IPV4_DEVCONF_PROXY_ARP,
142 IPV4_DEVCONF_ACCEPT_REDIRECTS,
143 IPV4_DEVCONF_SECURE_REDIRECTS,
144 IPV4_DEVCONF_SEND_REDIRECTS,
145 IPV4_DEVCONF_SHARED_MEDIA,
146 IPV4_DEVCONF_RP_FILTER,
147 IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
148 IPV4_DEVCONF_BOOTP_RELAY,
149 IPV4_DEVCONF_LOG_MARTIANS,
150 IPV4_DEVCONF_TAG,
151 IPV4_DEVCONF_ARPFILTER,
152 IPV4_DEVCONF_MEDIUM_ID,
153 IPV4_DEVCONF_NOXFRM,
154 IPV4_DEVCONF_NOPOLICY,
155 IPV4_DEVCONF_FORCE_IGMP_VERSION,
156 IPV4_DEVCONF_ARP_ANNOUNCE,
157 IPV4_DEVCONF_ARP_IGNORE,
158 IPV4_DEVCONF_PROMOTE_SECONDARIES,
159 IPV4_DEVCONF_ARP_ACCEPT,
160 IPV4_DEVCONF_ARP_NOTIFY,
161 IPV4_DEVCONF_ACCEPT_LOCAL,
162 IPV4_DEVCONF_SRC_VMARK,
163 IPV4_DEVCONF_PROXY_ARP_PVLAN,
164 IPV4_DEVCONF_ROUTE_LOCALNET,
165 __IPV4_DEVCONF_MAX
166};
167
168#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
169
136#endif /* _UAPI_LINUX_IP_H */ 170#endif /* _UAPI_LINUX_IP_H */
diff --git a/init/Kconfig b/init/Kconfig
index 247084be0590..fed81b576f29 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -955,7 +955,7 @@ config MEMCG_SWAP_ENABLED
955 Memory Resource Controller Swap Extension comes with its price in 955 Memory Resource Controller Swap Extension comes with its price in
956 a bigger memory consumption. General purpose distribution kernels 956 a bigger memory consumption. General purpose distribution kernels
957 which want to enable the feature but keep it disabled by default 957 which want to enable the feature but keep it disabled by default
958 and let the user enable it by swapaccount boot command line 958 and let the user enable it by swapaccount=1 boot command line
959 parameter should have this option unselected. 959 parameter should have this option unselected.
960 For those who want to have the feature enabled by default should 960 For those who want to have the feature enabled by default should
961 select this option (if, for some reason, they need to disable it 961 select this option (if, for some reason, they need to disable it
diff --git a/ipc/msg.c b/ipc/msg.c
index bd60d7e159e8..9f29d9e89bac 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -839,7 +839,7 @@ static inline void free_copy(struct msg_msg *copy)
839 839
840static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) 840static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
841{ 841{
842 struct msg_msg *msg; 842 struct msg_msg *msg, *found = NULL;
843 long count = 0; 843 long count = 0;
844 844
845 list_for_each_entry(msg, &msq->q_messages, m_list) { 845 list_for_each_entry(msg, &msq->q_messages, m_list) {
@@ -848,6 +848,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
848 *msgtyp, mode)) { 848 *msgtyp, mode)) {
849 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { 849 if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
850 *msgtyp = msg->m_type - 1; 850 *msgtyp = msg->m_type - 1;
851 found = msg;
851 } else if (mode == SEARCH_NUMBER) { 852 } else if (mode == SEARCH_NUMBER) {
852 if (*msgtyp == count) 853 if (*msgtyp == count)
853 return msg; 854 return msg;
@@ -857,7 +858,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
857 } 858 }
858 } 859 }
859 860
860 return ERR_PTR(-EAGAIN); 861 return found ?: ERR_PTR(-EAGAIN);
861} 862}
862 863
863long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, 864long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 781845a013ab..e91963302c0d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4480 struct dentry *d = cgrp->dentry; 4480 struct dentry *d = cgrp->dentry;
4481 struct cgroup_event *event, *tmp; 4481 struct cgroup_event *event, *tmp;
4482 struct cgroup_subsys *ss; 4482 struct cgroup_subsys *ss;
4483 struct cgroup *child;
4483 bool empty; 4484 bool empty;
4484 4485
4485 lockdep_assert_held(&d->d_inode->i_mutex); 4486 lockdep_assert_held(&d->d_inode->i_mutex);
@@ -4490,12 +4491,28 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
4490 * @cgrp from being removed while __put_css_set() is in progress. 4491 * @cgrp from being removed while __put_css_set() is in progress.
4491 */ 4492 */
4492 read_lock(&css_set_lock); 4493 read_lock(&css_set_lock);
4493 empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); 4494 empty = list_empty(&cgrp->cset_links);
4494 read_unlock(&css_set_lock); 4495 read_unlock(&css_set_lock);
4495 if (!empty) 4496 if (!empty)
4496 return -EBUSY; 4497 return -EBUSY;
4497 4498
4498 /* 4499 /*
4500 * Make sure there's no live children. We can't test ->children
4501 * emptiness as dead children linger on it while being destroyed;
4502 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
4503 */
4504 empty = true;
4505 rcu_read_lock();
4506 list_for_each_entry_rcu(child, &cgrp->children, sibling) {
4507 empty = cgroup_is_dead(child);
4508 if (!empty)
4509 break;
4510 }
4511 rcu_read_unlock();
4512 if (!empty)
4513 return -EBUSY;
4514
4515 /*
4499 * Block new css_tryget() by killing css refcnts. cgroup core 4516 * Block new css_tryget() by killing css refcnts. cgroup core
4500 * guarantees that, by the time ->css_offline() is invoked, no new 4517 * guarantees that, by the time ->css_offline() is invoked, no new
4501 * css reference will be given out via css_tryget(). We can't 4518 * css reference will be given out via css_tryget(). We can't
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 010a0083c0ae..ea1966db34f2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
475 475
476 /* 476 /*
477 * Cpusets with tasks - existing or newly being attached - can't 477 * Cpusets with tasks - existing or newly being attached - can't
478 * have empty cpus_allowed or mems_allowed. 478 * be changed to have empty cpus_allowed or mems_allowed.
479 */ 479 */
480 ret = -ENOSPC; 480 ret = -ENOSPC;
481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && 481 if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
482 (cpumask_empty(trial->cpus_allowed) && 482 if (!cpumask_empty(cur->cpus_allowed) &&
483 nodes_empty(trial->mems_allowed))) 483 cpumask_empty(trial->cpus_allowed))
484 goto out; 484 goto out;
485 if (!nodes_empty(cur->mems_allowed) &&
486 nodes_empty(trial->mems_allowed))
487 goto out;
488 }
485 489
486 ret = 0; 490 ret = 0;
487out: 491out:
diff --git a/kernel/fork.c b/kernel/fork.c
index e23bb19e2a3e..bf46287c91a4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1177 * don't allow the creation of threads. 1177 * don't allow the creation of threads.
1178 */ 1178 */
1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && 1179 if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
1180 (task_active_pid_ns(current) != current->nsproxy->pid_ns)) 1180 (task_active_pid_ns(current) !=
1181 current->nsproxy->pid_ns_for_children))
1181 return ERR_PTR(-EINVAL); 1182 return ERR_PTR(-EINVAL);
1182 1183
1183 retval = security_task_create(clone_flags); 1184 retval = security_task_create(clone_flags);
@@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1351 1352
1352 if (pid != &init_struct_pid) { 1353 if (pid != &init_struct_pid) {
1353 retval = -ENOMEM; 1354 retval = -ENOMEM;
1354 pid = alloc_pid(p->nsproxy->pid_ns); 1355 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1355 if (!pid) 1356 if (!pid)
1356 goto bad_fork_cleanup_io; 1357 goto bad_fork_cleanup_io;
1357 } 1358 }
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 364ceab15f0c..997cbb951a3b 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -29,15 +29,15 @@
29static struct kmem_cache *nsproxy_cachep; 29static struct kmem_cache *nsproxy_cachep;
30 30
31struct nsproxy init_nsproxy = { 31struct nsproxy init_nsproxy = {
32 .count = ATOMIC_INIT(1), 32 .count = ATOMIC_INIT(1),
33 .uts_ns = &init_uts_ns, 33 .uts_ns = &init_uts_ns,
34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) 34#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
35 .ipc_ns = &init_ipc_ns, 35 .ipc_ns = &init_ipc_ns,
36#endif 36#endif
37 .mnt_ns = NULL, 37 .mnt_ns = NULL,
38 .pid_ns = &init_pid_ns, 38 .pid_ns_for_children = &init_pid_ns,
39#ifdef CONFIG_NET 39#ifdef CONFIG_NET
40 .net_ns = &init_net, 40 .net_ns = &init_net,
41#endif 41#endif
42}; 42};
43 43
@@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
85 goto out_ipc; 85 goto out_ipc;
86 } 86 }
87 87
88 new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); 88 new_nsp->pid_ns_for_children =
89 if (IS_ERR(new_nsp->pid_ns)) { 89 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
90 err = PTR_ERR(new_nsp->pid_ns); 90 if (IS_ERR(new_nsp->pid_ns_for_children)) {
91 err = PTR_ERR(new_nsp->pid_ns_for_children);
91 goto out_pid; 92 goto out_pid;
92 } 93 }
93 94
@@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
100 return new_nsp; 101 return new_nsp;
101 102
102out_net: 103out_net:
103 if (new_nsp->pid_ns) 104 if (new_nsp->pid_ns_for_children)
104 put_pid_ns(new_nsp->pid_ns); 105 put_pid_ns(new_nsp->pid_ns_for_children);
105out_pid: 106out_pid:
106 if (new_nsp->ipc_ns) 107 if (new_nsp->ipc_ns)
107 put_ipc_ns(new_nsp->ipc_ns); 108 put_ipc_ns(new_nsp->ipc_ns);
@@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
174 put_uts_ns(ns->uts_ns); 175 put_uts_ns(ns->uts_ns);
175 if (ns->ipc_ns) 176 if (ns->ipc_ns)
176 put_ipc_ns(ns->ipc_ns); 177 put_ipc_ns(ns->ipc_ns);
177 if (ns->pid_ns) 178 if (ns->pid_ns_for_children)
178 put_pid_ns(ns->pid_ns); 179 put_pid_ns(ns->pid_ns_for_children);
179 put_net(ns->net_ns); 180 put_net(ns->net_ns);
180 kmem_cache_free(nsproxy_cachep, ns); 181 kmem_cache_free(nsproxy_cachep, ns);
181} 182}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 6917e8edb48e..601bb361c235 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
349 if (ancestor != active) 349 if (ancestor != active)
350 return -EINVAL; 350 return -EINVAL;
351 351
352 put_pid_ns(nsproxy->pid_ns); 352 put_pid_ns(nsproxy->pid_ns_for_children);
353 nsproxy->pid_ns = get_pid_ns(new); 353 nsproxy->pid_ns_for_children = get_pid_ns(new);
354 return 0; 354 return 0;
355} 355}
356 356
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3bdf28323012..61ed862cdd37 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
265static int timer_list_show(struct seq_file *m, void *v) 265static int timer_list_show(struct seq_file *m, void *v)
266{ 266{
267 struct timer_list_iter *iter = v; 267 struct timer_list_iter *iter = v;
268 u64 now = ktime_to_ns(ktime_get());
269 268
270 if (iter->cpu == -1 && !iter->second_pass) 269 if (iter->cpu == -1 && !iter->second_pass)
271 timer_list_header(m, now); 270 timer_list_header(m, iter->now);
272 else if (!iter->second_pass) 271 else if (!iter->second_pass)
273 print_cpu(m, iter->cpu, iter->now); 272 print_cpu(m, iter->cpu, iter->now);
274#ifdef CONFIG_GENERIC_CLOCKEVENTS 273#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -298,33 +297,41 @@ void sysrq_timer_list_show(void)
298 return; 297 return;
299} 298}
300 299
301static void *timer_list_start(struct seq_file *file, loff_t *offset) 300static void *move_iter(struct timer_list_iter *iter, loff_t offset)
302{ 301{
303 struct timer_list_iter *iter = file->private; 302 for (; offset; offset--) {
304 303 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
305 if (!*offset) { 304 if (iter->cpu >= nr_cpu_ids) {
306 iter->cpu = -1;
307 iter->now = ktime_to_ns(ktime_get());
308 } else if (iter->cpu >= nr_cpu_ids) {
309#ifdef CONFIG_GENERIC_CLOCKEVENTS 305#ifdef CONFIG_GENERIC_CLOCKEVENTS
310 if (!iter->second_pass) { 306 if (!iter->second_pass) {
311 iter->cpu = -1; 307 iter->cpu = -1;
312 iter->second_pass = true; 308 iter->second_pass = true;
313 } else 309 } else
314 return NULL; 310 return NULL;
315#else 311#else
316 return NULL; 312 return NULL;
317#endif 313#endif
314 }
318 } 315 }
319 return iter; 316 return iter;
320} 317}
321 318
319static void *timer_list_start(struct seq_file *file, loff_t *offset)
320{
321 struct timer_list_iter *iter = file->private;
322
323 if (!*offset)
324 iter->now = ktime_to_ns(ktime_get());
325 iter->cpu = -1;
326 iter->second_pass = false;
327 return move_iter(iter, *offset);
328}
329
322static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset) 330static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
323{ 331{
324 struct timer_list_iter *iter = file->private; 332 struct timer_list_iter *iter = file->private;
325 iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
326 ++*offset; 333 ++*offset;
327 return timer_list_start(file, offset); 334 return move_iter(iter, 1);
328} 335}
329 336
330static void timer_list_stop(struct seq_file *seq, void *v) 337static void timer_list_stop(struct seq_file *seq, void *v)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f5d4be22034..e93f7b9067d8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2201,6 +2201,15 @@ __acquires(&pool->lock)
2201 dump_stack(); 2201 dump_stack();
2202 } 2202 }
2203 2203
2204 /*
2205 * The following prevents a kworker from hogging CPU on !PREEMPT
2206 * kernels, where a requeueing work item waiting for something to
2207 * happen could deadlock with stop_machine as such work item could
2208 * indefinitely requeue itself while all other CPUs are trapped in
2209 * stop_machine.
2210 */
2211 cond_resched();
2212
2204 spin_lock_irq(&pool->lock); 2213 spin_lock_irq(&pool->lock);
2205 2214
2206 /* clear cpu intensive status */ 2215 /* clear cpu intensive status */
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index fd94058bd7f9..28321d8f75ef 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len,
437exit: 437exit:
438 return ret; 438 return ret;
439} 439}
440EXPORT_SYMBOL_GPL(lz4_compress); 440EXPORT_SYMBOL(lz4_compress);
441 441
442MODULE_LICENSE("GPL"); 442MODULE_LICENSE("Dual BSD/GPL");
443MODULE_DESCRIPTION("LZ4 compressor"); 443MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index d3414eae73a1..411be80ddb46 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -299,7 +299,7 @@ exit_0:
299 return ret; 299 return ret;
300} 300}
301#ifndef STATIC 301#ifndef STATIC
302EXPORT_SYMBOL_GPL(lz4_decompress); 302EXPORT_SYMBOL(lz4_decompress);
303#endif 303#endif
304 304
305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, 305int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@ exit_0:
319 return ret; 319 return ret;
320} 320}
321#ifndef STATIC 321#ifndef STATIC
322EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize); 322EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
323 323
324MODULE_LICENSE("GPL"); 324MODULE_LICENSE("Dual BSD/GPL");
325MODULE_DESCRIPTION("LZ4 Decompressor"); 325MODULE_DESCRIPTION("LZ4 Decompressor");
326#endif 326#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index eb1a74f5e368..f344f76b6559 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
533exit: 533exit:
534 return ret; 534 return ret;
535} 535}
536EXPORT_SYMBOL_GPL(lz4hc_compress); 536EXPORT_SYMBOL(lz4hc_compress);
537 537
538MODULE_LICENSE("GPL"); 538MODULE_LICENSE("Dual BSD/GPL");
539MODULE_DESCRIPTION("LZ4HC compressor"); 539MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c5792a5d87ce..0878ff7c26a9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6969,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
6969#ifdef CONFIG_MEMCG_SWAP 6969#ifdef CONFIG_MEMCG_SWAP
6970static int __init enable_swap_account(char *s) 6970static int __init enable_swap_account(char *s)
6971{ 6971{
6972 /* consider enabled if no parameter or 1 is given */
6973 if (!strcmp(s, "1")) 6972 if (!strcmp(s, "1"))
6974 really_do_swap_account = 1; 6973 really_do_swap_account = 1;
6975 else if (!strcmp(s, "0")) 6974 else if (!strcmp(s, "0"))
diff --git a/mm/mremap.c b/mm/mremap.c
index 457d34ef3bf2..0843feb66f3d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -15,6 +15,7 @@
15#include <linux/swap.h> 15#include <linux/swap.h>
16#include <linux/capability.h> 16#include <linux/capability.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/swapops.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/security.h> 20#include <linux/security.h>
20#include <linux/syscalls.h> 21#include <linux/syscalls.h>
@@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
69 return pmd; 70 return pmd;
70} 71}
71 72
73static pte_t move_soft_dirty_pte(pte_t pte)
74{
75 /*
76 * Set soft dirty bit so we can notice
77 * in userspace the ptes were moved.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80 if (pte_present(pte))
81 pte = pte_mksoft_dirty(pte);
82 else if (is_swap_pte(pte))
83 pte = pte_swp_mksoft_dirty(pte);
84 else if (pte_file(pte))
85 pte = pte_file_mksoft_dirty(pte);
86#endif
87 return pte;
88}
89
72static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 90static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
73 unsigned long old_addr, unsigned long old_end, 91 unsigned long old_addr, unsigned long old_end,
74 struct vm_area_struct *new_vma, pmd_t *new_pmd, 92 struct vm_area_struct *new_vma, pmd_t *new_pmd,
@@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
126 continue; 144 continue;
127 pte = ptep_get_and_clear(mm, old_addr, old_pte); 145 pte = ptep_get_and_clear(mm, old_addr, old_pte);
128 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 146 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
129 set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); 147 pte = move_soft_dirty_pte(pte);
148 set_pte_at(mm, new_addr, new_pte, pte);
130 } 149 }
131 150
132 arch_leave_lazy_mmu_mode(); 151 arch_leave_lazy_mmu_mode();
diff --git a/mm/shmem.c b/mm/shmem.c
index 8335dbd3fc35..e43dc555069d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2909,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
2909 2909
2910/* common code */ 2910/* common code */
2911 2911
2912static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
2913{
2914 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
2915 dentry->d_name.name);
2916}
2917
2918static struct dentry_operations anon_ops = { 2912static struct dentry_operations anon_ops = {
2919 .d_dname = shmem_dname 2913 .d_dname = simple_dname
2920}; 2914};
2921 2915
2922/** 2916/**
diff --git a/mm/slab.h b/mm/slab.h
index 620ceeddbe1a..a535033f7e9a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -162,6 +162,8 @@ static inline const char *cache_name(struct kmem_cache *s)
162 162
163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) 163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
164{ 164{
165 if (!s->memcg_params)
166 return NULL;
165 return s->memcg_params->memcg_caches[idx]; 167 return s->memcg_params->memcg_caches[idx];
166} 168}
167 169
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 688a0419756b..857e1b8349ee 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -432,12 +432,16 @@ find_router:
432 432
433 switch (packet_type) { 433 switch (packet_type) {
434 case BATADV_UNICAST: 434 case BATADV_UNICAST:
435 batadv_unicast_prepare_skb(skb, orig_node); 435 if (!batadv_unicast_prepare_skb(skb, orig_node))
436 goto out;
437
436 header_len = sizeof(struct batadv_unicast_packet); 438 header_len = sizeof(struct batadv_unicast_packet);
437 break; 439 break;
438 case BATADV_UNICAST_4ADDR: 440 case BATADV_UNICAST_4ADDR:
439 batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, 441 if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
440 packet_subtype); 442 packet_subtype))
443 goto out;
444
441 header_len = sizeof(struct batadv_unicast_4addr_packet); 445 header_len = sizeof(struct batadv_unicast_4addr_packet);
442 break; 446 break;
443 default: 447 default:
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 69363bd37f64..89659d4ed1f9 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
71 71
72 mdst = br_mdb_get(br, skb, vid); 72 mdst = br_mdb_get(br, skb, vid);
73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 73 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
74 br_multicast_querier_exists(br)) 74 br_multicast_querier_exists(br, eth_hdr(skb)))
75 br_multicast_deliver(mdst, skb); 75 br_multicast_deliver(mdst, skb);
76 else 76 else
77 br_flood_deliver(br, skb, false); 77 br_flood_deliver(br, skb, false);
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca9109a50..ffd5874f2592 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
161 if (!pv) 161 if (!pv)
162 return; 162 return;
163 163
164 for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 164 for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
165 f = __br_fdb_get(br, br->dev->dev_addr, vid); 165 f = __br_fdb_get(br, br->dev->dev_addr, vid);
166 if (f && f->is_local && !f->dst) 166 if (f && f->is_local && !f->dst)
167 fdb_delete(br, f); 167 fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
730 /* VID was specified, so use it. */ 730 /* VID was specified, so use it. */
731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 731 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
732 } else { 732 } else {
733 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 733 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); 734 err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
735 goto out; 735 goto out;
736 } 736 }
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
739 * specify a VLAN. To be nice, add/update entry for every 739 * specify a VLAN. To be nice, add/update entry for every
740 * vlan on this port. 740 * vlan on this port.
741 */ 741 */
742 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 742 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 743 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
744 if (err) 744 if (err)
745 goto out; 745 goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
817 817
818 err = __br_fdb_delete(p, addr, vid); 818 err = __br_fdb_delete(p, addr, vid);
819 } else { 819 } else {
820 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 820 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
821 err = __br_fdb_delete(p, addr, 0); 821 err = __br_fdb_delete(p, addr, 0);
822 goto out; 822 goto out;
823 } 823 }
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
827 * vlan on this port. 827 * vlan on this port.
828 */ 828 */
829 err = -ENOENT; 829 err = -ENOENT;
830 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 830 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
831 err &= __br_fdb_delete(p, addr, vid); 831 err &= __br_fdb_delete(p, addr, vid);
832 } 832 }
833 } 833 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 8c561c0aa636..a2fd37ec35f7 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -102,7 +102,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
102 } else if (is_multicast_ether_addr(dest)) { 102 } else if (is_multicast_ether_addr(dest)) {
103 mdst = br_mdb_get(br, skb, vid); 103 mdst = br_mdb_get(br, skb, vid);
104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && 104 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
105 br_multicast_querier_exists(br)) { 105 br_multicast_querier_exists(br, eth_hdr(skb))) {
106 if ((mdst && mdst->mglist) || 106 if ((mdst && mdst->mglist) ||
107 br_multicast_is_router(br)) 107 br_multicast_is_router(br))
108 skb2 = skb; 108 skb2 = skb;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0daae3ec2355..6319c4333c39 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -414,16 +414,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
414 if (!netif_running(br->dev) || br->multicast_disabled) 414 if (!netif_running(br->dev) || br->multicast_disabled)
415 return -EINVAL; 415 return -EINVAL;
416 416
417 if (timer_pending(&br->multicast_querier_timer))
418 return -EBUSY;
419
420 ip.proto = entry->addr.proto; 417 ip.proto = entry->addr.proto;
421 if (ip.proto == htons(ETH_P_IP)) 418 if (ip.proto == htons(ETH_P_IP)) {
419 if (timer_pending(&br->ip4_querier.timer))
420 return -EBUSY;
421
422 ip.u.ip4 = entry->addr.u.ip4; 422 ip.u.ip4 = entry->addr.u.ip4;
423#if IS_ENABLED(CONFIG_IPV6) 423#if IS_ENABLED(CONFIG_IPV6)
424 else 424 } else {
425 if (timer_pending(&br->ip6_querier.timer))
426 return -EBUSY;
427
425 ip.u.ip6 = entry->addr.u.ip6; 428 ip.u.ip6 = entry->addr.u.ip6;
426#endif 429#endif
430 }
427 431
428 spin_lock_bh(&br->multicast_lock); 432 spin_lock_bh(&br->multicast_lock);
429 mdb = mlock_dereference(br->mdb, br); 433 mdb = mlock_dereference(br->mdb, br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 08e576ada0b2..bbcb43582496 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,7 +33,8 @@
33 33
34#include "br_private.h" 34#include "br_private.h"
35 35
36static void br_multicast_start_querier(struct net_bridge *br); 36static void br_multicast_start_querier(struct net_bridge *br,
37 struct bridge_mcast_query *query);
37unsigned int br_mdb_rehash_seq; 38unsigned int br_mdb_rehash_seq;
38 39
39static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 40static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -755,20 +756,35 @@ static void br_multicast_local_router_expired(unsigned long data)
755{ 756{
756} 757}
757 758
758static void br_multicast_querier_expired(unsigned long data) 759static void br_multicast_querier_expired(struct net_bridge *br,
760 struct bridge_mcast_query *query)
759{ 761{
760 struct net_bridge *br = (void *)data;
761
762 spin_lock(&br->multicast_lock); 762 spin_lock(&br->multicast_lock);
763 if (!netif_running(br->dev) || br->multicast_disabled) 763 if (!netif_running(br->dev) || br->multicast_disabled)
764 goto out; 764 goto out;
765 765
766 br_multicast_start_querier(br); 766 br_multicast_start_querier(br, query);
767 767
768out: 768out:
769 spin_unlock(&br->multicast_lock); 769 spin_unlock(&br->multicast_lock);
770} 770}
771 771
772static void br_ip4_multicast_querier_expired(unsigned long data)
773{
774 struct net_bridge *br = (void *)data;
775
776 br_multicast_querier_expired(br, &br->ip4_query);
777}
778
779#if IS_ENABLED(CONFIG_IPV6)
780static void br_ip6_multicast_querier_expired(unsigned long data)
781{
782 struct net_bridge *br = (void *)data;
783
784 br_multicast_querier_expired(br, &br->ip6_query);
785}
786#endif
787
772static void __br_multicast_send_query(struct net_bridge *br, 788static void __br_multicast_send_query(struct net_bridge *br,
773 struct net_bridge_port *port, 789 struct net_bridge_port *port,
774 struct br_ip *ip) 790 struct br_ip *ip)
@@ -789,37 +805,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
789} 805}
790 806
791static void br_multicast_send_query(struct net_bridge *br, 807static void br_multicast_send_query(struct net_bridge *br,
792 struct net_bridge_port *port, u32 sent) 808 struct net_bridge_port *port,
809 struct bridge_mcast_query *query)
793{ 810{
794 unsigned long time; 811 unsigned long time;
795 struct br_ip br_group; 812 struct br_ip br_group;
813 struct bridge_mcast_querier *querier = NULL;
796 814
797 if (!netif_running(br->dev) || br->multicast_disabled || 815 if (!netif_running(br->dev) || br->multicast_disabled ||
798 !br->multicast_querier || 816 !br->multicast_querier)
799 timer_pending(&br->multicast_querier_timer))
800 return; 817 return;
801 818
802 memset(&br_group.u, 0, sizeof(br_group.u)); 819 memset(&br_group.u, 0, sizeof(br_group.u));
803 820
804 br_group.proto = htons(ETH_P_IP); 821 if (port ? (query == &port->ip4_query) :
805 __br_multicast_send_query(br, port, &br_group); 822 (query == &br->ip4_query)) {
806 823 querier = &br->ip4_querier;
824 br_group.proto = htons(ETH_P_IP);
807#if IS_ENABLED(CONFIG_IPV6) 825#if IS_ENABLED(CONFIG_IPV6)
808 br_group.proto = htons(ETH_P_IPV6); 826 } else {
809 __br_multicast_send_query(br, port, &br_group); 827 querier = &br->ip6_querier;
828 br_group.proto = htons(ETH_P_IPV6);
810#endif 829#endif
830 }
831
832 if (!querier || timer_pending(&querier->timer))
833 return;
834
835 __br_multicast_send_query(br, port, &br_group);
811 836
812 time = jiffies; 837 time = jiffies;
813 time += sent < br->multicast_startup_query_count ? 838 time += query->startup_sent < br->multicast_startup_query_count ?
814 br->multicast_startup_query_interval : 839 br->multicast_startup_query_interval :
815 br->multicast_query_interval; 840 br->multicast_query_interval;
816 mod_timer(port ? &port->multicast_query_timer : 841 mod_timer(&query->timer, time);
817 &br->multicast_query_timer, time);
818} 842}
819 843
820static void br_multicast_port_query_expired(unsigned long data) 844static void br_multicast_port_query_expired(struct net_bridge_port *port,
845 struct bridge_mcast_query *query)
821{ 846{
822 struct net_bridge_port *port = (void *)data;
823 struct net_bridge *br = port->br; 847 struct net_bridge *br = port->br;
824 848
825 spin_lock(&br->multicast_lock); 849 spin_lock(&br->multicast_lock);
@@ -827,25 +851,43 @@ static void br_multicast_port_query_expired(unsigned long data)
827 port->state == BR_STATE_BLOCKING) 851 port->state == BR_STATE_BLOCKING)
828 goto out; 852 goto out;
829 853
830 if (port->multicast_startup_queries_sent < 854 if (query->startup_sent < br->multicast_startup_query_count)
831 br->multicast_startup_query_count) 855 query->startup_sent++;
832 port->multicast_startup_queries_sent++;
833 856
834 br_multicast_send_query(port->br, port, 857 br_multicast_send_query(port->br, port, query);
835 port->multicast_startup_queries_sent);
836 858
837out: 859out:
838 spin_unlock(&br->multicast_lock); 860 spin_unlock(&br->multicast_lock);
839} 861}
840 862
863static void br_ip4_multicast_port_query_expired(unsigned long data)
864{
865 struct net_bridge_port *port = (void *)data;
866
867 br_multicast_port_query_expired(port, &port->ip4_query);
868}
869
870#if IS_ENABLED(CONFIG_IPV6)
871static void br_ip6_multicast_port_query_expired(unsigned long data)
872{
873 struct net_bridge_port *port = (void *)data;
874
875 br_multicast_port_query_expired(port, &port->ip6_query);
876}
877#endif
878
841void br_multicast_add_port(struct net_bridge_port *port) 879void br_multicast_add_port(struct net_bridge_port *port)
842{ 880{
843 port->multicast_router = 1; 881 port->multicast_router = 1;
844 882
845 setup_timer(&port->multicast_router_timer, br_multicast_router_expired, 883 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
846 (unsigned long)port); 884 (unsigned long)port);
847 setup_timer(&port->multicast_query_timer, 885 setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
848 br_multicast_port_query_expired, (unsigned long)port); 886 (unsigned long)port);
887#if IS_ENABLED(CONFIG_IPV6)
888 setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
889 (unsigned long)port);
890#endif
849} 891}
850 892
851void br_multicast_del_port(struct net_bridge_port *port) 893void br_multicast_del_port(struct net_bridge_port *port)
@@ -853,13 +895,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
853 del_timer_sync(&port->multicast_router_timer); 895 del_timer_sync(&port->multicast_router_timer);
854} 896}
855 897
856static void __br_multicast_enable_port(struct net_bridge_port *port) 898static void br_multicast_enable(struct bridge_mcast_query *query)
857{ 899{
858 port->multicast_startup_queries_sent = 0; 900 query->startup_sent = 0;
859 901
860 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || 902 if (try_to_del_timer_sync(&query->timer) >= 0 ||
861 del_timer(&port->multicast_query_timer)) 903 del_timer(&query->timer))
862 mod_timer(&port->multicast_query_timer, jiffies); 904 mod_timer(&query->timer, jiffies);
863} 905}
864 906
865void br_multicast_enable_port(struct net_bridge_port *port) 907void br_multicast_enable_port(struct net_bridge_port *port)
@@ -870,7 +912,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
870 if (br->multicast_disabled || !netif_running(br->dev)) 912 if (br->multicast_disabled || !netif_running(br->dev))
871 goto out; 913 goto out;
872 914
873 __br_multicast_enable_port(port); 915 br_multicast_enable(&port->ip4_query);
916#if IS_ENABLED(CONFIG_IPV6)
917 br_multicast_enable(&port->ip6_query);
918#endif
874 919
875out: 920out:
876 spin_unlock(&br->multicast_lock); 921 spin_unlock(&br->multicast_lock);
@@ -889,7 +934,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
889 if (!hlist_unhashed(&port->rlist)) 934 if (!hlist_unhashed(&port->rlist))
890 hlist_del_init_rcu(&port->rlist); 935 hlist_del_init_rcu(&port->rlist);
891 del_timer(&port->multicast_router_timer); 936 del_timer(&port->multicast_router_timer);
892 del_timer(&port->multicast_query_timer); 937 del_timer(&port->ip4_query.timer);
938#if IS_ENABLED(CONFIG_IPV6)
939 del_timer(&port->ip6_query.timer);
940#endif
893 spin_unlock(&br->multicast_lock); 941 spin_unlock(&br->multicast_lock);
894} 942}
895 943
@@ -1014,14 +1062,15 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1014} 1062}
1015#endif 1063#endif
1016 1064
1017static void br_multicast_update_querier_timer(struct net_bridge *br, 1065static void
1018 unsigned long max_delay) 1066br_multicast_update_querier_timer(struct net_bridge *br,
1067 struct bridge_mcast_querier *querier,
1068 unsigned long max_delay)
1019{ 1069{
1020 if (!timer_pending(&br->multicast_querier_timer)) 1070 if (!timer_pending(&querier->timer))
1021 br->multicast_querier_delay_time = jiffies + max_delay; 1071 querier->delay_time = jiffies + max_delay;
1022 1072
1023 mod_timer(&br->multicast_querier_timer, 1073 mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
1024 jiffies + br->multicast_querier_interval);
1025} 1074}
1026 1075
1027/* 1076/*
@@ -1074,12 +1123,13 @@ timer:
1074 1123
1075static void br_multicast_query_received(struct net_bridge *br, 1124static void br_multicast_query_received(struct net_bridge *br,
1076 struct net_bridge_port *port, 1125 struct net_bridge_port *port,
1126 struct bridge_mcast_querier *querier,
1077 int saddr, 1127 int saddr,
1078 unsigned long max_delay) 1128 unsigned long max_delay)
1079{ 1129{
1080 if (saddr) 1130 if (saddr)
1081 br_multicast_update_querier_timer(br, max_delay); 1131 br_multicast_update_querier_timer(br, querier, max_delay);
1082 else if (timer_pending(&br->multicast_querier_timer)) 1132 else if (timer_pending(&querier->timer))
1083 return; 1133 return;
1084 1134
1085 br_multicast_mark_router(br, port); 1135 br_multicast_mark_router(br, port);
@@ -1129,7 +1179,8 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1129 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; 1179 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1130 } 1180 }
1131 1181
1132 br_multicast_query_received(br, port, !!iph->saddr, max_delay); 1182 br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
1183 max_delay);
1133 1184
1134 if (!group) 1185 if (!group)
1135 goto out; 1186 goto out;
@@ -1203,11 +1254,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1203 mld2q = (struct mld2_query *)icmp6_hdr(skb); 1254 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1204 if (!mld2q->mld2q_nsrcs) 1255 if (!mld2q->mld2q_nsrcs)
1205 group = &mld2q->mld2q_mca; 1256 group = &mld2q->mld2q_mca;
1206 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1; 1257
1258 max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
1207 } 1259 }
1208 1260
1209 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr), 1261 br_multicast_query_received(br, port, &br->ip6_querier,
1210 max_delay); 1262 !ipv6_addr_any(&ip6h->saddr), max_delay);
1211 1263
1212 if (!group) 1264 if (!group)
1213 goto out; 1265 goto out;
@@ -1244,7 +1296,9 @@ out:
1244 1296
1245static void br_multicast_leave_group(struct net_bridge *br, 1297static void br_multicast_leave_group(struct net_bridge *br,
1246 struct net_bridge_port *port, 1298 struct net_bridge_port *port,
1247 struct br_ip *group) 1299 struct br_ip *group,
1300 struct bridge_mcast_querier *querier,
1301 struct bridge_mcast_query *query)
1248{ 1302{
1249 struct net_bridge_mdb_htable *mdb; 1303 struct net_bridge_mdb_htable *mdb;
1250 struct net_bridge_mdb_entry *mp; 1304 struct net_bridge_mdb_entry *mp;
@@ -1255,7 +1309,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1255 spin_lock(&br->multicast_lock); 1309 spin_lock(&br->multicast_lock);
1256 if (!netif_running(br->dev) || 1310 if (!netif_running(br->dev) ||
1257 (port && port->state == BR_STATE_DISABLED) || 1311 (port && port->state == BR_STATE_DISABLED) ||
1258 timer_pending(&br->multicast_querier_timer)) 1312 timer_pending(&querier->timer))
1259 goto out; 1313 goto out;
1260 1314
1261 mdb = mlock_dereference(br->mdb, br); 1315 mdb = mlock_dereference(br->mdb, br);
@@ -1263,14 +1317,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
1263 if (!mp) 1317 if (!mp)
1264 goto out; 1318 goto out;
1265 1319
1266 if (br->multicast_querier && 1320 if (br->multicast_querier) {
1267 !timer_pending(&br->multicast_querier_timer)) {
1268 __br_multicast_send_query(br, port, &mp->addr); 1321 __br_multicast_send_query(br, port, &mp->addr);
1269 1322
1270 time = jiffies + br->multicast_last_member_count * 1323 time = jiffies + br->multicast_last_member_count *
1271 br->multicast_last_member_interval; 1324 br->multicast_last_member_interval;
1272 mod_timer(port ? &port->multicast_query_timer : 1325
1273 &br->multicast_query_timer, time); 1326 mod_timer(&query->timer, time);
1274 1327
1275 for (p = mlock_dereference(mp->ports, br); 1328 for (p = mlock_dereference(mp->ports, br);
1276 p != NULL; 1329 p != NULL;
@@ -1323,7 +1376,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1323 mod_timer(&mp->timer, time); 1376 mod_timer(&mp->timer, time);
1324 } 1377 }
1325 } 1378 }
1326
1327out: 1379out:
1328 spin_unlock(&br->multicast_lock); 1380 spin_unlock(&br->multicast_lock);
1329} 1381}
@@ -1334,6 +1386,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1334 __u16 vid) 1386 __u16 vid)
1335{ 1387{
1336 struct br_ip br_group; 1388 struct br_ip br_group;
1389 struct bridge_mcast_query *query = port ? &port->ip4_query :
1390 &br->ip4_query;
1337 1391
1338 if (ipv4_is_local_multicast(group)) 1392 if (ipv4_is_local_multicast(group))
1339 return; 1393 return;
@@ -1342,7 +1396,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1342 br_group.proto = htons(ETH_P_IP); 1396 br_group.proto = htons(ETH_P_IP);
1343 br_group.vid = vid; 1397 br_group.vid = vid;
1344 1398
1345 br_multicast_leave_group(br, port, &br_group); 1399 br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
1346} 1400}
1347 1401
1348#if IS_ENABLED(CONFIG_IPV6) 1402#if IS_ENABLED(CONFIG_IPV6)
@@ -1352,6 +1406,9 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1352 __u16 vid) 1406 __u16 vid)
1353{ 1407{
1354 struct br_ip br_group; 1408 struct br_ip br_group;
1409 struct bridge_mcast_query *query = port ? &port->ip6_query :
1410 &br->ip6_query;
1411
1355 1412
1356 if (!ipv6_is_transient_multicast(group)) 1413 if (!ipv6_is_transient_multicast(group))
1357 return; 1414 return;
@@ -1360,7 +1417,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1360 br_group.proto = htons(ETH_P_IPV6); 1417 br_group.proto = htons(ETH_P_IPV6);
1361 br_group.vid = vid; 1418 br_group.vid = vid;
1362 1419
1363 br_multicast_leave_group(br, port, &br_group); 1420 br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
1364} 1421}
1365#endif 1422#endif
1366 1423
@@ -1622,19 +1679,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1622 return 0; 1679 return 0;
1623} 1680}
1624 1681
1625static void br_multicast_query_expired(unsigned long data) 1682static void br_multicast_query_expired(struct net_bridge *br,
1683 struct bridge_mcast_query *query)
1684{
1685 spin_lock(&br->multicast_lock);
1686 if (query->startup_sent < br->multicast_startup_query_count)
1687 query->startup_sent++;
1688
1689 br_multicast_send_query(br, NULL, query);
1690 spin_unlock(&br->multicast_lock);
1691}
1692
1693static void br_ip4_multicast_query_expired(unsigned long data)
1626{ 1694{
1627 struct net_bridge *br = (void *)data; 1695 struct net_bridge *br = (void *)data;
1628 1696
1629 spin_lock(&br->multicast_lock); 1697 br_multicast_query_expired(br, &br->ip4_query);
1630 if (br->multicast_startup_queries_sent < 1698}
1631 br->multicast_startup_query_count)
1632 br->multicast_startup_queries_sent++;
1633 1699
1634 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); 1700#if IS_ENABLED(CONFIG_IPV6)
1701static void br_ip6_multicast_query_expired(unsigned long data)
1702{
1703 struct net_bridge *br = (void *)data;
1635 1704
1636 spin_unlock(&br->multicast_lock); 1705 br_multicast_query_expired(br, &br->ip6_query);
1637} 1706}
1707#endif
1638 1708
1639void br_multicast_init(struct net_bridge *br) 1709void br_multicast_init(struct net_bridge *br)
1640{ 1710{
@@ -1654,25 +1724,43 @@ void br_multicast_init(struct net_bridge *br)
1654 br->multicast_querier_interval = 255 * HZ; 1724 br->multicast_querier_interval = 255 * HZ;
1655 br->multicast_membership_interval = 260 * HZ; 1725 br->multicast_membership_interval = 260 * HZ;
1656 1726
1657 br->multicast_querier_delay_time = 0; 1727 br->ip4_querier.delay_time = 0;
1728#if IS_ENABLED(CONFIG_IPV6)
1729 br->ip6_querier.delay_time = 0;
1730#endif
1658 1731
1659 spin_lock_init(&br->multicast_lock); 1732 spin_lock_init(&br->multicast_lock);
1660 setup_timer(&br->multicast_router_timer, 1733 setup_timer(&br->multicast_router_timer,
1661 br_multicast_local_router_expired, 0); 1734 br_multicast_local_router_expired, 0);
1662 setup_timer(&br->multicast_querier_timer, 1735 setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
1663 br_multicast_querier_expired, (unsigned long)br); 1736 (unsigned long)br);
1664 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1737 setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
1665 (unsigned long)br); 1738 (unsigned long)br);
1739#if IS_ENABLED(CONFIG_IPV6)
1740 setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
1741 (unsigned long)br);
1742 setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
1743 (unsigned long)br);
1744#endif
1666} 1745}
1667 1746
1668void br_multicast_open(struct net_bridge *br) 1747static void __br_multicast_open(struct net_bridge *br,
1748 struct bridge_mcast_query *query)
1669{ 1749{
1670 br->multicast_startup_queries_sent = 0; 1750 query->startup_sent = 0;
1671 1751
1672 if (br->multicast_disabled) 1752 if (br->multicast_disabled)
1673 return; 1753 return;
1674 1754
1675 mod_timer(&br->multicast_query_timer, jiffies); 1755 mod_timer(&query->timer, jiffies);
1756}
1757
1758void br_multicast_open(struct net_bridge *br)
1759{
1760 __br_multicast_open(br, &br->ip4_query);
1761#if IS_ENABLED(CONFIG_IPV6)
1762 __br_multicast_open(br, &br->ip6_query);
1763#endif
1676} 1764}
1677 1765
1678void br_multicast_stop(struct net_bridge *br) 1766void br_multicast_stop(struct net_bridge *br)
@@ -1684,8 +1772,12 @@ void br_multicast_stop(struct net_bridge *br)
1684 int i; 1772 int i;
1685 1773
1686 del_timer_sync(&br->multicast_router_timer); 1774 del_timer_sync(&br->multicast_router_timer);
1687 del_timer_sync(&br->multicast_querier_timer); 1775 del_timer_sync(&br->ip4_querier.timer);
1688 del_timer_sync(&br->multicast_query_timer); 1776 del_timer_sync(&br->ip4_query.timer);
1777#if IS_ENABLED(CONFIG_IPV6)
1778 del_timer_sync(&br->ip6_querier.timer);
1779 del_timer_sync(&br->ip6_query.timer);
1780#endif
1689 1781
1690 spin_lock_bh(&br->multicast_lock); 1782 spin_lock_bh(&br->multicast_lock);
1691 mdb = mlock_dereference(br->mdb, br); 1783 mdb = mlock_dereference(br->mdb, br);
@@ -1788,18 +1880,24 @@ unlock:
1788 return err; 1880 return err;
1789} 1881}
1790 1882
1791static void br_multicast_start_querier(struct net_bridge *br) 1883static void br_multicast_start_querier(struct net_bridge *br,
1884 struct bridge_mcast_query *query)
1792{ 1885{
1793 struct net_bridge_port *port; 1886 struct net_bridge_port *port;
1794 1887
1795 br_multicast_open(br); 1888 __br_multicast_open(br, query);
1796 1889
1797 list_for_each_entry(port, &br->port_list, list) { 1890 list_for_each_entry(port, &br->port_list, list) {
1798 if (port->state == BR_STATE_DISABLED || 1891 if (port->state == BR_STATE_DISABLED ||
1799 port->state == BR_STATE_BLOCKING) 1892 port->state == BR_STATE_BLOCKING)
1800 continue; 1893 continue;
1801 1894
1802 __br_multicast_enable_port(port); 1895 if (query == &br->ip4_query)
1896 br_multicast_enable(&port->ip4_query);
1897#if IS_ENABLED(CONFIG_IPV6)
1898 else
1899 br_multicast_enable(&port->ip6_query);
1900#endif
1803 } 1901 }
1804} 1902}
1805 1903
@@ -1834,7 +1932,10 @@ rollback:
1834 goto rollback; 1932 goto rollback;
1835 } 1933 }
1836 1934
1837 br_multicast_start_querier(br); 1935 br_multicast_start_querier(br, &br->ip4_query);
1936#if IS_ENABLED(CONFIG_IPV6)
1937 br_multicast_start_querier(br, &br->ip6_query);
1938#endif
1838 1939
1839unlock: 1940unlock:
1840 spin_unlock_bh(&br->multicast_lock); 1941 spin_unlock_bh(&br->multicast_lock);
@@ -1857,10 +1958,18 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1857 goto unlock; 1958 goto unlock;
1858 1959
1859 max_delay = br->multicast_query_response_interval; 1960 max_delay = br->multicast_query_response_interval;
1860 if (!timer_pending(&br->multicast_querier_timer))
1861 br->multicast_querier_delay_time = jiffies + max_delay;
1862 1961
1863 br_multicast_start_querier(br); 1962 if (!timer_pending(&br->ip4_querier.timer))
1963 br->ip4_querier.delay_time = jiffies + max_delay;
1964
1965 br_multicast_start_querier(br, &br->ip4_query);
1966
1967#if IS_ENABLED(CONFIG_IPV6)
1968 if (!timer_pending(&br->ip6_querier.timer))
1969 br->ip6_querier.delay_time = jiffies + max_delay;
1970
1971 br_multicast_start_querier(br, &br->ip6_query);
1972#endif
1864 1973
1865unlock: 1974unlock:
1866 spin_unlock_bh(&br->multicast_lock); 1975 spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30abd3a52..b9259efa636e 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
132 else 132 else
133 pv = br_get_vlan_info(br); 133 pv = br_get_vlan_info(br);
134 134
135 if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) 135 if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
136 goto done; 136 goto done;
137 137
138 af = nla_nest_start(skb, IFLA_AF_SPEC); 138 af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
140 goto nla_put_failure; 140 goto nla_put_failure;
141 141
142 pvid = br_get_pvid(pv); 142 pvid = br_get_pvid(pv);
143 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) { 143 for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
144 vinfo.vid = vid; 144 vinfo.vid = vid;
145 vinfo.flags = 0; 145 vinfo.flags = 0;
146 if (vid == pvid) 146 if (vid == pvid)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2f7da41851bf..263ba9034468 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -66,6 +66,20 @@ struct br_ip
66 __u16 vid; 66 __u16 vid;
67}; 67};
68 68
69#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
70/* our own querier */
71struct bridge_mcast_query {
72 struct timer_list timer;
73 u32 startup_sent;
74};
75
76/* other querier */
77struct bridge_mcast_querier {
78 struct timer_list timer;
79 unsigned long delay_time;
80};
81#endif
82
69struct net_port_vlans { 83struct net_port_vlans {
70 u16 port_idx; 84 u16 port_idx;
71 u16 pvid; 85 u16 pvid;
@@ -162,10 +176,12 @@ struct net_bridge_port
162#define BR_FLOOD 0x00000040 176#define BR_FLOOD 0x00000040
163 177
164#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 178#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
165 u32 multicast_startup_queries_sent; 179 struct bridge_mcast_query ip4_query;
180#if IS_ENABLED(CONFIG_IPV6)
181 struct bridge_mcast_query ip6_query;
182#endif /* IS_ENABLED(CONFIG_IPV6) */
166 unsigned char multicast_router; 183 unsigned char multicast_router;
167 struct timer_list multicast_router_timer; 184 struct timer_list multicast_router_timer;
168 struct timer_list multicast_query_timer;
169 struct hlist_head mglist; 185 struct hlist_head mglist;
170 struct hlist_node rlist; 186 struct hlist_node rlist;
171#endif 187#endif
@@ -258,7 +274,6 @@ struct net_bridge
258 u32 hash_max; 274 u32 hash_max;
259 275
260 u32 multicast_last_member_count; 276 u32 multicast_last_member_count;
261 u32 multicast_startup_queries_sent;
262 u32 multicast_startup_query_count; 277 u32 multicast_startup_query_count;
263 278
264 unsigned long multicast_last_member_interval; 279 unsigned long multicast_last_member_interval;
@@ -267,15 +282,18 @@ struct net_bridge
267 unsigned long multicast_query_interval; 282 unsigned long multicast_query_interval;
268 unsigned long multicast_query_response_interval; 283 unsigned long multicast_query_response_interval;
269 unsigned long multicast_startup_query_interval; 284 unsigned long multicast_startup_query_interval;
270 unsigned long multicast_querier_delay_time;
271 285
272 spinlock_t multicast_lock; 286 spinlock_t multicast_lock;
273 struct net_bridge_mdb_htable __rcu *mdb; 287 struct net_bridge_mdb_htable __rcu *mdb;
274 struct hlist_head router_list; 288 struct hlist_head router_list;
275 289
276 struct timer_list multicast_router_timer; 290 struct timer_list multicast_router_timer;
277 struct timer_list multicast_querier_timer; 291 struct bridge_mcast_querier ip4_querier;
278 struct timer_list multicast_query_timer; 292 struct bridge_mcast_query ip4_query;
293#if IS_ENABLED(CONFIG_IPV6)
294 struct bridge_mcast_querier ip6_querier;
295 struct bridge_mcast_query ip6_query;
296#endif /* IS_ENABLED(CONFIG_IPV6) */
279#endif 297#endif
280 298
281 struct timer_list hello_timer; 299 struct timer_list hello_timer;
@@ -503,11 +521,27 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
503 timer_pending(&br->multicast_router_timer)); 521 timer_pending(&br->multicast_router_timer));
504} 522}
505 523
506static inline bool br_multicast_querier_exists(struct net_bridge *br) 524static inline bool
525__br_multicast_querier_exists(struct net_bridge *br,
526 struct bridge_mcast_querier *querier)
527{
528 return time_is_before_jiffies(querier->delay_time) &&
529 (br->multicast_querier || timer_pending(&querier->timer));
530}
531
532static inline bool br_multicast_querier_exists(struct net_bridge *br,
533 struct ethhdr *eth)
507{ 534{
508 return time_is_before_jiffies(br->multicast_querier_delay_time) && 535 switch (eth->h_proto) {
509 (br->multicast_querier || 536 case (htons(ETH_P_IP)):
510 timer_pending(&br->multicast_querier_timer)); 537 return __br_multicast_querier_exists(br, &br->ip4_querier);
538#if IS_ENABLED(CONFIG_IPV6)
539 case (htons(ETH_P_IPV6)):
540 return __br_multicast_querier_exists(br, &br->ip6_querier);
541#endif
542 default:
543 return false;
544 }
511} 545}
512#else 546#else
513static inline int br_multicast_rcv(struct net_bridge *br, 547static inline int br_multicast_rcv(struct net_bridge *br,
@@ -565,7 +599,8 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
565{ 599{
566 return 0; 600 return 0;
567} 601}
568static inline bool br_multicast_querier_exists(struct net_bridge *br) 602static inline bool br_multicast_querier_exists(struct net_bridge *br,
603 struct ethhdr *eth)
569{ 604{
570 return false; 605 return false;
571} 606}
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45f5f90..9a9ffe7e4019 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
108 108
109 clear_bit(vid, v->vlan_bitmap); 109 clear_bit(vid, v->vlan_bitmap);
110 v->num_vlans--; 110 v->num_vlans--;
111 if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) { 111 if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
112 if (v->port_idx) 112 if (v->port_idx)
113 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 113 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
114 else 114 else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
122{ 122{
123 smp_wmb(); 123 smp_wmb();
124 v->pvid = 0; 124 v->pvid = 0;
125 bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN); 125 bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
126 if (v->port_idx) 126 if (v->port_idx)
127 rcu_assign_pointer(v->parent.port->vlan_info, NULL); 127 rcu_assign_pointer(v->parent.port->vlan_info, NULL);
128 else 128 else
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index b84a1b155bc1..d12e3a9a5356 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -346,14 +346,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
346 if (new_index < 0) 346 if (new_index < 0)
347 new_index = skb_tx_hash(dev, skb); 347 new_index = skb_tx_hash(dev, skb);
348 348
349 if (queue_index != new_index && sk) { 349 if (queue_index != new_index && sk &&
350 struct dst_entry *dst = 350 rcu_access_pointer(sk->sk_dst_cache))
351 rcu_dereference_check(sk->sk_dst_cache, 1); 351 sk_tx_queue_set(sk, queue_index);
352
353 if (dst && skb_dst(skb) == dst)
354 sk_tx_queue_set(sk, queue_index);
355
356 }
357 352
358 queue_index = new_index; 353 queue_index = new_index;
359 } 354 }
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0147f2..b4da80b1cc07 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
54 return -EINVAL; 54 return -EINVAL;
55 55
56 if ((creds->pid == task_tgid_vnr(current) || 56 if ((creds->pid == task_tgid_vnr(current) ||
57 ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && 57 ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || 58 ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && 59 uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || 60 ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4bcabf3ab4ca..9ee17e3d11c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
211 return -EINVAL; 211 return -EINVAL;
212} 212}
213 213
214static inline int ip_skb_dst_mtu(struct sk_buff *skb)
215{
216 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
217
218 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
219 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
220}
221
222static int ip_finish_output(struct sk_buff *skb) 214static int ip_finish_output(struct sk_buff *skb)
223{ 215{
224#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) 216#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 51fc2a1dcdd3..b3ac3c3f6219 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
190 struct ip_tunnel *tunnel; 190 struct ip_tunnel *tunnel;
191 const struct iphdr *iph; 191 const struct iphdr *iph;
192 192
193 if (iptunnel_pull_header(skb, 0, tpi.proto))
194 goto drop;
195
196 iph = ip_hdr(skb); 193 iph = ip_hdr(skb);
197 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 194 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
198 iph->saddr, iph->daddr, 0); 195 iph->saddr, iph->daddr, 0);
199 if (tunnel) { 196 if (tunnel) {
200 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 197 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
201 goto drop; 198 goto drop;
199 if (iptunnel_pull_header(skb, 0, tpi.proto))
200 goto drop;
202 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 201 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
203 } 202 }
204 203
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dd44e0ab600c..61e60d67adca 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, 571 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
572 RT_SCOPE_UNIVERSE, 572 RT_SCOPE_UNIVERSE,
573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, 573 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP, 574 inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
575 (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
575 daddr, saddr, 0, 0); 576 daddr, saddr, 0, 0);
576 577
577 if (!inet->hdrincl) { 578 if (!inet->hdrincl) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223e93c2..b2f6c74861af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1121,6 +1121,13 @@ new_segment:
1121 goto wait_for_memory; 1121 goto wait_for_memory;
1122 1122
1123 /* 1123 /*
1124 * All packets are restored as if they have
1125 * already been sent.
1126 */
1127 if (tp->repair)
1128 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1129
1130 /*
1124 * Check whether we can use HW checksum. 1131 * Check whether we can use HW checksum.
1125 */ 1132 */
1126 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) 1133 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28af45abe062..3ca2139a130b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3535,7 +3535,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3535 ++ptr; 3535 ++ptr;
3536 tp->rx_opt.rcv_tsval = ntohl(*ptr); 3536 tp->rx_opt.rcv_tsval = ntohl(*ptr);
3537 ++ptr; 3537 ++ptr;
3538 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; 3538 if (*ptr)
3539 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
3540 else
3541 tp->rx_opt.rcv_tsecr = 0;
3539 return true; 3542 return true;
3540 } 3543 }
3541 return false; 3544 return false;
@@ -3560,7 +3563,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3560 } 3563 }
3561 3564
3562 tcp_parse_options(skb, &tp->rx_opt, 1, NULL); 3565 tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
3563 if (tp->rx_opt.saw_tstamp) 3566 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
3564 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3567 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3565 3568
3566 return true; 3569 return true;
@@ -5316,7 +5319,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5316 int saved_clamp = tp->rx_opt.mss_clamp; 5319 int saved_clamp = tp->rx_opt.mss_clamp;
5317 5320
5318 tcp_parse_options(skb, &tp->rx_opt, 0, &foc); 5321 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5319 if (tp->rx_opt.saw_tstamp) 5322 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
5320 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5323 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5321 5324
5322 if (th->ack) { 5325 if (th->ack) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 92fde8d1aa82..170737a9d56d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2670,7 +2670,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2670 int tcp_header_size; 2670 int tcp_header_size;
2671 int mss; 2671 int mss;
2672 2672
2673 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC)); 2673 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
2674 if (unlikely(!skb)) { 2674 if (unlikely(!skb)) {
2675 dst_release(dst); 2675 dst_release(dst);
2676 return NULL; 2676 return NULL;
@@ -2814,6 +2814,8 @@ void tcp_connect_init(struct sock *sk)
2814 2814
2815 if (likely(!tp->repair)) 2815 if (likely(!tp->repair))
2816 tp->rcv_nxt = 0; 2816 tp->rcv_nxt = 0;
2817 else
2818 tp->rcv_tstamp = tcp_time_stamp;
2817 tp->rcv_wup = tp->rcv_nxt; 2819 tp->rcv_wup = tp->rcv_nxt;
2818 tp->copied_seq = tp->rcv_nxt; 2820 tp->copied_seq = tp->rcv_nxt;
2819 2821
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 327a617d594c..baa0f63731fd 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -21,7 +21,6 @@
21static int xfrm4_tunnel_check_size(struct sk_buff *skb) 21static int xfrm4_tunnel_check_size(struct sk_buff *skb)
22{ 22{
23 int mtu, ret = 0; 23 int mtu, ret = 0;
24 struct dst_entry *dst;
25 24
26 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) 25 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
27 goto out; 26 goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
29 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) 28 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
30 goto out; 29 goto out;
31 30
32 dst = skb_dst(skb); 31 mtu = dst_mtu(skb_dst(skb));
33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 32 if (skb->len > mtu) {
35 if (skb->sk) 33 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr, 34 xfrm_local_error(skb, mtu);
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else 35 else
39 icmp_send(skb, ICMP_DEST_UNREACH, 36 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu)); 37 ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
99 x->outer_mode->afinfo->output_finish, 96 x->outer_mode->afinfo->output_finish,
100 !(IPCB(skb)->flags & IPSKB_REROUTED)); 97 !(IPCB(skb)->flags & IPSKB_REROUTED));
101} 98}
99
100void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
101{
102 struct iphdr *hdr;
103
104 hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
105 ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
106 inet_sk(skb->sk)->inet_dport, mtu);
107}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 9258e751baba..0b2a0641526a 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
83 .extract_input = xfrm4_extract_input, 83 .extract_input = xfrm4_extract_input,
84 .extract_output = xfrm4_extract_output, 84 .extract_output = xfrm4_extract_output,
85 .transport_finish = xfrm4_transport_finish, 85 .transport_finish = xfrm4_transport_finish,
86 .local_error = xfrm4_local_error,
86}; 87};
87 88
88void __init xfrm4_state_init(void) 89void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index da4241c8c7da..498ea99194af 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1126,12 +1126,10 @@ retry:
1126 if (ifp->flags & IFA_F_OPTIMISTIC) 1126 if (ifp->flags & IFA_F_OPTIMISTIC)
1127 addr_flags |= IFA_F_OPTIMISTIC; 1127 addr_flags |= IFA_F_OPTIMISTIC;
1128 1128
1129 ift = !max_addresses || 1129 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1130 ipv6_count_addresses(idev) < max_addresses ? 1130 ipv6_addr_scope(&addr), addr_flags,
1131 ipv6_add_addr(idev, &addr, NULL, tmp_plen, 1131 tmp_valid_lft, tmp_prefered_lft);
1132 ipv6_addr_scope(&addr), addr_flags, 1132 if (IS_ERR(ift)) {
1133 tmp_valid_lft, tmp_prefered_lft) : NULL;
1134 if (IS_ERR_OR_NULL(ift)) {
1135 in6_ifa_put(ifp); 1133 in6_ifa_put(ifp);
1136 in6_dev_put(idev); 1134 in6_dev_put(idev);
1137 pr_info("%s: retry temporary address regeneration\n", __func__); 1135 pr_info("%s: retry temporary address regeneration\n", __func__);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ecd60733e5e2..90747f1973fe 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -724,6 +724,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
724 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 724 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
725 } 725 }
726 726
727 if (likely(!skb->encapsulation)) {
728 skb_reset_inner_headers(skb);
729 skb->encapsulation = 1;
730 }
731
727 skb_push(skb, gre_hlen); 732 skb_push(skb, gre_hlen);
728 skb_reset_network_header(skb); 733 skb_reset_network_header(skb);
729 skb_set_transport_header(skb, sizeof(*ipv6h)); 734 skb_set_transport_header(skb, sizeof(*ipv6h));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6e3ddf806ec2..e7ceb6c871d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -238,6 +238,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
238 hdr->saddr = fl6->saddr; 238 hdr->saddr = fl6->saddr;
239 hdr->daddr = *first_hop; 239 hdr->daddr = *first_hop;
240 240
241 skb->protocol = htons(ETH_P_IPV6);
241 skb->priority = sk->sk_priority; 242 skb->priority = sk->sk_priority;
242 skb->mark = sk->sk_mark; 243 skb->mark = sk->sk_mark;
243 244
@@ -1057,6 +1058,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1057 /* initialize protocol header pointer */ 1058 /* initialize protocol header pointer */
1058 skb->transport_header = skb->network_header + fragheaderlen; 1059 skb->transport_header = skb->network_header + fragheaderlen;
1059 1060
1061 skb->protocol = htons(ETH_P_IPV6);
1060 skb->ip_summed = CHECKSUM_PARTIAL; 1062 skb->ip_summed = CHECKSUM_PARTIAL;
1061 skb->csum = 0; 1063 skb->csum = 0;
1062 } 1064 }
@@ -1359,6 +1361,7 @@ alloc_new_skb:
1359 /* 1361 /*
1360 * Fill in the control structures 1362 * Fill in the control structures
1361 */ 1363 */
1364 skb->protocol = htons(ETH_P_IPV6);
1362 skb->ip_summed = CHECKSUM_NONE; 1365 skb->ip_summed = CHECKSUM_NONE;
1363 skb->csum = 0; 1366 skb->csum = 0;
1364 /* reserve for fragmentation and ipsec header */ 1367 /* reserve for fragmentation and ipsec header */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1e55866cead7..46ba243605a3 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1027,6 +1027,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1027 init_tel_txopt(&opt, encap_limit); 1027 init_tel_txopt(&opt, encap_limit);
1028 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 1028 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
1029 } 1029 }
1030
1031 if (likely(!skb->encapsulation)) {
1032 skb_reset_inner_headers(skb);
1033 skb->encapsulation = 1;
1034 }
1035
1030 skb_push(skb, sizeof(struct ipv6hdr)); 1036 skb_push(skb, sizeof(struct ipv6hdr));
1031 skb_reset_network_header(skb); 1037 skb_reset_network_header(skb);
1032 ipv6h = ipv6_hdr(skb); 1038 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 79aa9652ed86..04d31c2fbef1 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts)) 1369 if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
1370 return; 1370 return;
1371 1371
1372 if (!ndopts.nd_opts_rh) 1372 if (!ndopts.nd_opts_rh) {
1373 ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
1373 return; 1374 return;
1375 }
1374 1376
1375 hdr = (u8 *)ndopts.nd_opts_rh; 1377 hdr = (u8 *)ndopts.nd_opts_rh;
1376 hdr += 8; 1378 hdr += 8;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c45f7a5c36e9..cdaed47ba932 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -628,6 +628,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
628 goto error; 628 goto error;
629 skb_reserve(skb, hlen); 629 skb_reserve(skb, hlen);
630 630
631 skb->protocol = htons(ETH_P_IPV6);
631 skb->priority = sk->sk_priority; 632 skb->priority = sk->sk_priority;
632 skb->mark = sk->sk_mark; 633 skb->mark = sk->sk_mark;
633 skb_dst_set(skb, &rt->dst); 634 skb_dst_set(skb, &rt->dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4b8b0b..1aeb473b2cc6 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
490 ipv6_hdr(head)->payload_len = htons(payload_len); 490 ipv6_hdr(head)->payload_len = htons(payload_len);
491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn); 491 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
492 IP6CB(head)->nhoff = nhoff; 492 IP6CB(head)->nhoff = nhoff;
493 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
493 494
494 /* Yes, and fold redundant checksum back. 8) */ 495 /* Yes, and fold redundant checksum back. 8) */
495 if (head->ip_summed == CHECKSUM_COMPLETE) 496 if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
524 struct net *net = dev_net(skb_dst(skb)->dev); 525 struct net *net = dev_net(skb_dst(skb)->dev);
525 int evicted; 526 int evicted;
526 527
528 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
529 goto fail_hdr;
530
527 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 531 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
528 532
529 /* Jumbo payload inhibits frag. header */ 533 /* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
544 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 548 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
545 549
546 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 550 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
551 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
547 return 1; 552 return 1;
548 } 553 }
549 554
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index b70f8979003b..8d9a93ed9c59 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1178} 1178}
1179EXPORT_SYMBOL_GPL(ip6_redirect); 1179EXPORT_SYMBOL_GPL(ip6_redirect);
1180 1180
1181void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1182 u32 mark)
1183{
1184 const struct ipv6hdr *iph = ipv6_hdr(skb);
1185 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1186 struct dst_entry *dst;
1187 struct flowi6 fl6;
1188
1189 memset(&fl6, 0, sizeof(fl6));
1190 fl6.flowi6_oif = oif;
1191 fl6.flowi6_mark = mark;
1192 fl6.flowi6_flags = 0;
1193 fl6.daddr = msg->dest;
1194 fl6.saddr = iph->daddr;
1195
1196 dst = ip6_route_output(net, NULL, &fl6);
1197 if (!dst->error)
1198 rt6_do_redirect(dst, NULL, skb);
1199 dst_release(dst);
1200}
1201
1181void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) 1202void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1182{ 1203{
1183 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark); 1204 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a3437a4cd07e..21b25dd8466b 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -645,11 +645,7 @@ static int ipip_rcv(struct sk_buff *skb)
645 const struct iphdr *iph; 645 const struct iphdr *iph;
646 struct ip_tunnel *tunnel; 646 struct ip_tunnel *tunnel;
647 647
648 if (iptunnel_pull_header(skb, 0, tpi.proto))
649 goto drop;
650
651 iph = ip_hdr(skb); 648 iph = ip_hdr(skb);
652
653 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev, 649 tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
654 iph->saddr, iph->daddr); 650 iph->saddr, iph->daddr);
655 if (tunnel != NULL) { 651 if (tunnel != NULL) {
@@ -659,6 +655,8 @@ static int ipip_rcv(struct sk_buff *skb)
659 655
660 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 656 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
661 goto drop; 657 goto drop;
658 if (iptunnel_pull_header(skb, 0, tpi.proto))
659 goto drop;
662 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error); 660 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
663 } 661 }
664 662
@@ -888,6 +886,11 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
888 ttl = iph6->hop_limit; 886 ttl = iph6->hop_limit;
889 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 887 tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
890 888
889 if (likely(!skb->encapsulation)) {
890 skb_reset_inner_headers(skb);
891 skb->encapsulation = 1;
892 }
893
891 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr, 894 err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
892 IPPROTO_IPV6, tos, ttl, df); 895 IPPROTO_IPV6, tos, ttl, df);
893 iptunnel_xmit_stats(err, &dev->stats, dev->tstats); 896 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8755a3079d0f..6cd625e37706 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
34 struct sock *sk = skb->sk; 34 struct sock *sk = skb->sk;
35 35
36 if (sk) { 36 if (sk) {
37 proto = sk->sk_protocol; 37 if (sk->sk_family != AF_INET6)
38 return 0;
38 39
40 proto = sk->sk_protocol;
39 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 41 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
40 return inet6_sk(sk)->dontfrag; 42 return inet6_sk(sk)->dontfrag;
41 } 43 }
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
54 ipv6_local_rxpmtu(sk, &fl6, mtu); 56 ipv6_local_rxpmtu(sk, &fl6, mtu);
55} 57}
56 58
57static void xfrm6_local_error(struct sk_buff *skb, u32 mtu) 59void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
58{ 60{
59 struct flowi6 fl6; 61 struct flowi6 fl6;
62 const struct ipv6hdr *hdr;
60 struct sock *sk = skb->sk; 63 struct sock *sk = skb->sk;
61 64
65 hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
62 fl6.fl6_dport = inet_sk(sk)->inet_dport; 66 fl6.fl6_dport = inet_sk(sk)->inet_dport;
63 fl6.daddr = ipv6_hdr(skb)->daddr; 67 fl6.daddr = hdr->daddr;
64 68
65 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); 69 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
66} 70}
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
80 if (xfrm6_local_dontfrag(skb)) 84 if (xfrm6_local_dontfrag(skb))
81 xfrm6_local_rxpmtu(skb, mtu); 85 xfrm6_local_rxpmtu(skb, mtu);
82 else if (skb->sk) 86 else if (skb->sk)
83 xfrm6_local_error(skb, mtu); 87 xfrm_local_error(skb, mtu);
84 else 88 else
85 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 89 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
86 ret = -EMSGSIZE; 90 ret = -EMSGSIZE;
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
136{ 140{
137 struct dst_entry *dst = skb_dst(skb); 141 struct dst_entry *dst = skb_dst(skb);
138 struct xfrm_state *x = dst->xfrm; 142 struct xfrm_state *x = dst->xfrm;
139 int mtu = ip6_skb_dst_mtu(skb); 143 int mtu;
144
145 if (skb->protocol == htons(ETH_P_IPV6))
146 mtu = ip6_skb_dst_mtu(skb);
147 else
148 mtu = dst_mtu(skb_dst(skb));
140 149
141 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { 150 if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
142 xfrm6_local_rxpmtu(skb, mtu); 151 xfrm6_local_rxpmtu(skb, mtu);
143 return -EMSGSIZE; 152 return -EMSGSIZE;
144 } else if (!skb->local_df && skb->len > mtu && skb->sk) { 153 } else if (!skb->local_df && skb->len > mtu && skb->sk) {
145 xfrm6_local_error(skb, mtu); 154 xfrm_local_error(skb, mtu);
146 return -EMSGSIZE; 155 return -EMSGSIZE;
147 } 156 }
148 157
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index d8c70b8efc24..3fc970135fc6 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
183 .extract_input = xfrm6_extract_input, 183 .extract_input = xfrm6_extract_input,
184 .extract_output = xfrm6_extract_output, 184 .extract_output = xfrm6_extract_output,
185 .transport_finish = xfrm6_transport_finish, 185 .transport_finish = xfrm6_transport_finish,
186 .local_error = xfrm6_local_error,
186}; 187};
187 188
188int __init xfrm6_state_init(void) 189int __init xfrm6_state_init(void)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ea7b9c2c7e66..2d45643c964e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -36,7 +36,7 @@
36 36
37static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, 37static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
38 const u8 *bssid, const int beacon_int, 38 const u8 *bssid, const int beacon_int,
39 struct ieee80211_channel *chan, 39 struct cfg80211_chan_def *req_chandef,
40 const u32 basic_rates, 40 const u32 basic_rates,
41 const u16 capability, u64 tsf, 41 const u16 capability, u64 tsf,
42 bool creator) 42 bool creator)
@@ -51,6 +51,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
51 u32 bss_change; 51 u32 bss_change;
52 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 52 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
53 struct cfg80211_chan_def chandef; 53 struct cfg80211_chan_def chandef;
54 struct ieee80211_channel *chan;
54 struct beacon_data *presp; 55 struct beacon_data *presp;
55 int frame_len; 56 int frame_len;
56 57
@@ -81,7 +82,9 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
81 82
82 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; 83 sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
83 84
84 chandef = ifibss->chandef; 85 /* make a copy of the chandef, it could be modified below. */
86 chandef = *req_chandef;
87 chan = chandef.chan;
85 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) { 88 if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
86 chandef.width = NL80211_CHAN_WIDTH_20; 89 chandef.width = NL80211_CHAN_WIDTH_20;
87 chandef.center_freq1 = chan->center_freq; 90 chandef.center_freq1 = chan->center_freq;
@@ -259,10 +262,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
259 struct cfg80211_bss *cbss = 262 struct cfg80211_bss *cbss =
260 container_of((void *)bss, struct cfg80211_bss, priv); 263 container_of((void *)bss, struct cfg80211_bss, priv);
261 struct ieee80211_supported_band *sband; 264 struct ieee80211_supported_band *sband;
265 struct cfg80211_chan_def chandef;
262 u32 basic_rates; 266 u32 basic_rates;
263 int i, j; 267 int i, j;
264 u16 beacon_int = cbss->beacon_interval; 268 u16 beacon_int = cbss->beacon_interval;
265 const struct cfg80211_bss_ies *ies; 269 const struct cfg80211_bss_ies *ies;
270 enum nl80211_channel_type chan_type;
266 u64 tsf; 271 u64 tsf;
267 272
268 sdata_assert_lock(sdata); 273 sdata_assert_lock(sdata);
@@ -270,6 +275,26 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
270 if (beacon_int < 10) 275 if (beacon_int < 10)
271 beacon_int = 10; 276 beacon_int = 10;
272 277
278 switch (sdata->u.ibss.chandef.width) {
279 case NL80211_CHAN_WIDTH_20_NOHT:
280 case NL80211_CHAN_WIDTH_20:
281 case NL80211_CHAN_WIDTH_40:
282 chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef);
283 cfg80211_chandef_create(&chandef, cbss->channel, chan_type);
284 break;
285 case NL80211_CHAN_WIDTH_5:
286 case NL80211_CHAN_WIDTH_10:
287 cfg80211_chandef_create(&chandef, cbss->channel,
288 NL80211_CHAN_WIDTH_20_NOHT);
289 chandef.width = sdata->u.ibss.chandef.width;
290 break;
291 default:
292 /* fall back to 20 MHz for unsupported modes */
293 cfg80211_chandef_create(&chandef, cbss->channel,
294 NL80211_CHAN_WIDTH_20_NOHT);
295 break;
296 }
297
273 sband = sdata->local->hw.wiphy->bands[cbss->channel->band]; 298 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
274 299
275 basic_rates = 0; 300 basic_rates = 0;
@@ -294,7 +319,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
294 319
295 __ieee80211_sta_join_ibss(sdata, cbss->bssid, 320 __ieee80211_sta_join_ibss(sdata, cbss->bssid,
296 beacon_int, 321 beacon_int,
297 cbss->channel, 322 &chandef,
298 basic_rates, 323 basic_rates,
299 cbss->capability, 324 cbss->capability,
300 tsf, false); 325 tsf, false);
@@ -736,7 +761,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
736 sdata->drop_unencrypted = 0; 761 sdata->drop_unencrypted = 0;
737 762
738 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, 763 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
739 ifibss->chandef.chan, ifibss->basic_rates, 764 &ifibss->chandef, ifibss->basic_rates,
740 capability, 0, true); 765 capability, 0, true);
741} 766}
742 767
@@ -1138,6 +1163,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1138 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1163 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
1139 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1164 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
1140 BSS_CHANGED_IBSS); 1165 BSS_CHANGED_IBSS);
1166 ieee80211_vif_release_channel(sdata);
1141 synchronize_rcu(); 1167 synchronize_rcu();
1142 kfree(presp); 1168 kfree(presp);
1143 1169
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f5aed963b22e..f3bbea1eb9e7 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -828,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
828 if (sband->band != IEEE80211_BAND_2GHZ) 828 if (sband->band != IEEE80211_BAND_2GHZ)
829 return; 829 return;
830 830
831 if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
832 return;
833
831 mi->cck_supported = 0; 834 mi->cck_supported = 0;
832 mi->cck_supported_short = 0; 835 mi->cck_supported_short = 0;
833 for (i = 0; i < 4; i++) { 836 for (i = 0; i < 4; i++) {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f85f8a2ad6cf..0c741cec4d0d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
364EXPORT_SYMBOL(genl_unregister_ops); 364EXPORT_SYMBOL(genl_unregister_ops);
365 365
366/** 366/**
367 * genl_register_family - register a generic netlink family 367 * __genl_register_family - register a generic netlink family
368 * @family: generic netlink family 368 * @family: generic netlink family
369 * 369 *
370 * Registers the specified family after validating it first. Only one 370 * Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
374 * 374 *
375 * Return 0 on success or a negative error code. 375 * Return 0 on success or a negative error code.
376 */ 376 */
377int genl_register_family(struct genl_family *family) 377int __genl_register_family(struct genl_family *family)
378{ 378{
379 int err = -EINVAL; 379 int err = -EINVAL;
380 380
@@ -430,10 +430,10 @@ errout_locked:
430errout: 430errout:
431 return err; 431 return err;
432} 432}
433EXPORT_SYMBOL(genl_register_family); 433EXPORT_SYMBOL(__genl_register_family);
434 434
435/** 435/**
436 * genl_register_family_with_ops - register a generic netlink family 436 * __genl_register_family_with_ops - register a generic netlink family
437 * @family: generic netlink family 437 * @family: generic netlink family
438 * @ops: operations to be registered 438 * @ops: operations to be registered
439 * @n_ops: number of elements to register 439 * @n_ops: number of elements to register
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
457 * 457 *
458 * Return 0 on success or a negative error code. 458 * Return 0 on success or a negative error code.
459 */ 459 */
460int genl_register_family_with_ops(struct genl_family *family, 460int __genl_register_family_with_ops(struct genl_family *family,
461 struct genl_ops *ops, size_t n_ops) 461 struct genl_ops *ops, size_t n_ops)
462{ 462{
463 int err, i; 463 int err, i;
464 464
465 err = genl_register_family(family); 465 err = __genl_register_family(family);
466 if (err) 466 if (err)
467 return err; 467 return err;
468 468
@@ -476,7 +476,7 @@ err_out:
476 genl_unregister_family(family); 476 genl_unregister_family(family);
477 return err; 477 return err;
478} 478}
479EXPORT_SYMBOL(genl_register_family_with_ops); 479EXPORT_SYMBOL(__genl_register_family_with_ops);
480 480
481/** 481/**
482 * genl_unregister_family - unregister generic netlink family 482 * genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
544} 544}
545EXPORT_SYMBOL(genlmsg_put); 545EXPORT_SYMBOL(genlmsg_put);
546 546
547static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
548{
549 struct genl_ops *ops = cb->data;
550 int rc;
551
552 genl_lock();
553 rc = ops->dumpit(skb, cb);
554 genl_unlock();
555 return rc;
556}
557
558static int genl_lock_done(struct netlink_callback *cb)
559{
560 struct genl_ops *ops = cb->data;
561 int rc = 0;
562
563 if (ops->done) {
564 genl_lock();
565 rc = ops->done(cb);
566 genl_unlock();
567 }
568 return rc;
569}
570
547static int genl_family_rcv_msg(struct genl_family *family, 571static int genl_family_rcv_msg(struct genl_family *family,
548 struct sk_buff *skb, 572 struct sk_buff *skb,
549 struct nlmsghdr *nlh) 573 struct nlmsghdr *nlh)
@@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
572 return -EPERM; 596 return -EPERM;
573 597
574 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) { 598 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
575 struct netlink_dump_control c = { 599 int rc;
576 .dump = ops->dumpit,
577 .done = ops->done,
578 };
579 600
580 if (ops->dumpit == NULL) 601 if (ops->dumpit == NULL)
581 return -EOPNOTSUPP; 602 return -EOPNOTSUPP;
582 603
583 return netlink_dump_start(net->genl_sock, skb, nlh, &c); 604 if (!family->parallel_ops) {
605 struct netlink_dump_control c = {
606 .module = family->module,
607 .data = ops,
608 .dump = genl_lock_dumpit,
609 .done = genl_lock_done,
610 };
611
612 genl_unlock();
613 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
614 genl_lock();
615
616 } else {
617 struct netlink_dump_control c = {
618 .module = family->module,
619 .dump = ops->dumpit,
620 .done = ops->done,
621 };
622
623 rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
624 }
625
626 return rc;
584 } 627 }
585 628
586 if (ops->doit == NULL) 629 if (ops->doit == NULL)
@@ -789,10 +832,6 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
789 struct net *net = sock_net(skb->sk); 832 struct net *net = sock_net(skb->sk);
790 int chains_to_skip = cb->args[0]; 833 int chains_to_skip = cb->args[0];
791 int fams_to_skip = cb->args[1]; 834 int fams_to_skip = cb->args[1];
792 bool need_locking = chains_to_skip || fams_to_skip;
793
794 if (need_locking)
795 genl_lock();
796 835
797 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { 836 for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
798 n = 0; 837 n = 0;
@@ -814,9 +853,6 @@ errout:
814 cb->args[0] = i; 853 cb->args[0] = i;
815 cb->args[1] = n; 854 cb->args[1] = n;
816 855
817 if (need_locking)
818 genl_unlock();
819
820 return skb->len; 856 return skb->len;
821} 857}
822 858
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c752eae5..75c8bbf598c8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3259 3259
3260 if (po->tp_version == TPACKET_V3) { 3260 if (po->tp_version == TPACKET_V3) {
3261 lv = sizeof(struct tpacket_stats_v3); 3261 lv = sizeof(struct tpacket_stats_v3);
3262 st.stats3.tp_packets += st.stats3.tp_drops;
3262 data = &st.stats3; 3263 data = &st.stats3;
3263 } else { 3264 } else {
3264 lv = sizeof(struct tpacket_stats); 3265 lv = sizeof(struct tpacket_stats);
3266 st.stats1.tp_packets += st.stats1.tp_drops;
3265 data = &st.stats1; 3267 data = &st.stats1;
3266 } 3268 }
3267 3269
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 75edcfad6e26..1504bb11e4f3 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
207 pgfrom_base -= copy; 207 pgfrom_base -= copy;
208 208
209 vto = kmap_atomic(*pgto); 209 vto = kmap_atomic(*pgto);
210 vfrom = kmap_atomic(*pgfrom); 210 if (*pgto != *pgfrom) {
211 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 211 vfrom = kmap_atomic(*pgfrom);
212 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
213 kunmap_atomic(vfrom);
214 } else
215 memmove(vto + pgto_base, vto + pgfrom_base, copy);
212 flush_dcache_page(*pgto); 216 flush_dcache_page(*pgto);
213 kunmap_atomic(vfrom);
214 kunmap_atomic(vto); 217 kunmap_atomic(vto);
215 218
216 } while ((len -= copy) != 0); 219 } while ((len -= copy) != 0);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ce8249c76827..6cc7ddd2fb7c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1257 /* Accept only ACK or NACK message */ 1257 /* Accept only ACK or NACK message */
1258 if (unlikely(msg_errcode(msg))) { 1258 if (unlikely(msg_errcode(msg))) {
1259 sock->state = SS_DISCONNECTING; 1259 sock->state = SS_DISCONNECTING;
1260 sk->sk_err = -ECONNREFUSED; 1260 sk->sk_err = ECONNREFUSED;
1261 retval = TIPC_OK; 1261 retval = TIPC_OK;
1262 break; 1262 break;
1263 } 1263 }
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1268 res = auto_connect(sock, msg); 1268 res = auto_connect(sock, msg);
1269 if (res) { 1269 if (res) {
1270 sock->state = SS_DISCONNECTING; 1270 sock->state = SS_DISCONNECTING;
1271 sk->sk_err = res; 1271 sk->sk_err = -res;
1272 retval = TIPC_OK; 1272 retval = TIPC_OK;
1273 break; 1273 break;
1274 } 1274 }
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 3fcba69817e5..5f6e982cdcf4 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2622,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
2622 2622
2623 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 2623 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
2624 NL80211_CMD_NEW_KEY); 2624 NL80211_CMD_NEW_KEY);
2625 if (IS_ERR(hdr)) 2625 if (!hdr)
2626 return PTR_ERR(hdr); 2626 return -ENOBUFS;
2627 2627
2628 cookie.msg = msg; 2628 cookie.msg = msg;
2629 cookie.idx = key_idx; 2629 cookie.idx = key_idx;
@@ -6507,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
6507 NL80211_CMD_TESTMODE); 6507 NL80211_CMD_TESTMODE);
6508 struct nlattr *tmdata; 6508 struct nlattr *tmdata;
6509 6509
6510 if (!hdr)
6511 break;
6512
6510 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) { 6513 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
6511 genlmsg_cancel(skb, hdr); 6514 genlmsg_cancel(skb, hdr);
6512 break; 6515 break;
@@ -6951,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
6951 6954
6952 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 6955 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
6953 NL80211_CMD_REMAIN_ON_CHANNEL); 6956 NL80211_CMD_REMAIN_ON_CHANNEL);
6954 6957 if (!hdr) {
6955 if (IS_ERR(hdr)) { 6958 err = -ENOBUFS;
6956 err = PTR_ERR(hdr);
6957 goto free_msg; 6959 goto free_msg;
6958 } 6960 }
6959 6961
@@ -7251,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
7251 7253
7252 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 7254 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
7253 NL80211_CMD_FRAME); 7255 NL80211_CMD_FRAME);
7254 7256 if (!hdr) {
7255 if (IS_ERR(hdr)) { 7257 err = -ENOBUFS;
7256 err = PTR_ERR(hdr);
7257 goto free_msg; 7258 goto free_msg;
7258 } 7259 }
7259 } 7260 }
@@ -8132,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
8132 8133
8133 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, 8134 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
8134 NL80211_CMD_PROBE_CLIENT); 8135 NL80211_CMD_PROBE_CLIENT);
8135 8136 if (!hdr) {
8136 if (IS_ERR(hdr)) { 8137 err = -ENOBUFS;
8137 err = PTR_ERR(hdr);
8138 goto free_msg; 8138 goto free_msg;
8139 } 8139 }
8140 8140
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 81c8a10d743c..20e86a95dc4e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
976 struct net_device *dev, u16 reason, bool wextev) 976 struct net_device *dev, u16 reason, bool wextev)
977{ 977{
978 struct wireless_dev *wdev = dev->ieee80211_ptr; 978 struct wireless_dev *wdev = dev->ieee80211_ptr;
979 int err; 979 int err = 0;
980 980
981 ASSERT_WDEV_LOCK(wdev); 981 ASSERT_WDEV_LOCK(wdev);
982 982
983 kfree(wdev->connect_keys); 983 kfree(wdev->connect_keys);
984 wdev->connect_keys = NULL; 984 wdev->connect_keys = NULL;
985 985
986 if (wdev->conn) { 986 if (wdev->conn)
987 err = cfg80211_sme_disconnect(wdev, reason); 987 err = cfg80211_sme_disconnect(wdev, reason);
988 } else if (!rdev->ops->disconnect) { 988 else if (!rdev->ops->disconnect)
989 cfg80211_mlme_down(rdev, dev); 989 cfg80211_mlme_down(rdev, dev);
990 err = 0; 990 else if (wdev->current_bss)
991 } else {
992 err = rdev_disconnect(rdev, dev, reason); 991 err = rdev_disconnect(rdev, dev, reason);
993 }
994 992
995 return err; 993 return err;
996} 994}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index eb4a84288648..3bb2cdc13b46 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
214 return inner_mode->afinfo->extract_output(x, skb); 214 return inner_mode->afinfo->extract_output(x, skb);
215} 215}
216 216
217void xfrm_local_error(struct sk_buff *skb, int mtu)
218{
219 unsigned int proto;
220 struct xfrm_state_afinfo *afinfo;
221
222 if (skb->protocol == htons(ETH_P_IP))
223 proto = AF_INET;
224 else if (skb->protocol == htons(ETH_P_IPV6))
225 proto = AF_INET6;
226 else
227 return;
228
229 afinfo = xfrm_state_get_afinfo(proto);
230 if (!afinfo)
231 return;
232
233 afinfo->local_error(skb, mtu);
234 xfrm_state_put_afinfo(afinfo);
235}
236
217EXPORT_SYMBOL_GPL(xfrm_output); 237EXPORT_SYMBOL_GPL(xfrm_output);
218EXPORT_SYMBOL_GPL(xfrm_inner_extract_output); 238EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
239EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e52cab3591dd..f77c371ea72b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
320{ 320{
321 struct sk_buff *skb; 321 struct sk_buff *skb;
322 322
323 while ((skb = skb_dequeue(list)) != NULL) { 323 while ((skb = skb_dequeue(list)) != NULL)
324 dev_put(skb->dev);
325 kfree_skb(skb); 324 kfree_skb(skb);
326 }
327} 325}
328 326
329/* Rule must be locked. Release descentant resources, announce 327/* Rule must be locked. Release descentant resources, announce
@@ -1758,7 +1756,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1758 struct sk_buff *skb; 1756 struct sk_buff *skb;
1759 struct sock *sk; 1757 struct sock *sk;
1760 struct dst_entry *dst; 1758 struct dst_entry *dst;
1761 struct net_device *dev;
1762 struct xfrm_policy *pol = (struct xfrm_policy *)arg; 1759 struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1763 struct xfrm_policy_queue *pq = &pol->polq; 1760 struct xfrm_policy_queue *pq = &pol->polq;
1764 struct flowi fl; 1761 struct flowi fl;
@@ -1805,7 +1802,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
1805 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, 1802 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1806 &fl, skb->sk, 0); 1803 &fl, skb->sk, 0);
1807 if (IS_ERR(dst)) { 1804 if (IS_ERR(dst)) {
1808 dev_put(skb->dev);
1809 kfree_skb(skb); 1805 kfree_skb(skb);
1810 continue; 1806 continue;
1811 } 1807 }
@@ -1814,9 +1810,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
1814 skb_dst_drop(skb); 1810 skb_dst_drop(skb);
1815 skb_dst_set(skb, dst); 1811 skb_dst_set(skb, dst);
1816 1812
1817 dev = skb->dev;
1818 err = dst_output(skb); 1813 err = dst_output(skb);
1819 dev_put(dev);
1820 } 1814 }
1821 1815
1822 return; 1816 return;
@@ -1839,7 +1833,6 @@ static int xdst_queue_output(struct sk_buff *skb)
1839 } 1833 }
1840 1834
1841 skb_dst_force(skb); 1835 skb_dst_force(skb);
1842 dev_hold(skb->dev);
1843 1836
1844 spin_lock_bh(&pq->hold_queue.lock); 1837 spin_lock_bh(&pq->hold_queue.lock);
1845 1838
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 78f66fa92449..54c0acd29468 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
39 39
40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
41 41
42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
44
45static inline unsigned int xfrm_dst_hash(struct net *net, 42static inline unsigned int xfrm_dst_hash(struct net *net,
46 const xfrm_address_t *daddr, 43 const xfrm_address_t *daddr,
47 const xfrm_address_t *saddr, 44 const xfrm_address_t *saddr,
@@ -1860,7 +1857,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
1860} 1857}
1861EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 1858EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
1862 1859
1863static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 1860struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1864{ 1861{
1865 struct xfrm_state_afinfo *afinfo; 1862 struct xfrm_state_afinfo *afinfo;
1866 if (unlikely(family >= NPROTO)) 1863 if (unlikely(family >= NPROTO))
@@ -1872,7 +1869,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
1872 return afinfo; 1869 return afinfo;
1873} 1870}
1874 1871
1875static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo) 1872void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
1876{ 1873{
1877 rcu_read_unlock(); 1874 rcu_read_unlock();
1878} 1875}
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 103b33373fd4..6effe99bbb9c 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
173 173
174#endif /* CONFIG_PNP */ 174#endif /* CONFIG_PNP */
175 175
176#ifdef OPTi93X 176#define DEV_NAME KBUILD_MODNAME
177#define DEV_NAME "opti93x"
178#else
179#define DEV_NAME "opti92x"
180#endif
181 177
182static char * snd_opti9xx_names[] = { 178static char * snd_opti9xx_names[] = {
183 "unknown", 179 "unknown",
@@ -1167,7 +1163,7 @@ static int snd_opti9xx_pnp_resume(struct pnp_card_link *pcard)
1167 1163
1168static struct pnp_card_driver opti9xx_pnpc_driver = { 1164static struct pnp_card_driver opti9xx_pnpc_driver = {
1169 .flags = PNP_DRIVER_RES_DISABLE, 1165 .flags = PNP_DRIVER_RES_DISABLE,
1170 .name = "opti9xx", 1166 .name = DEV_NAME,
1171 .id_table = snd_opti9xx_pnpids, 1167 .id_table = snd_opti9xx_pnpids,
1172 .probe = snd_opti9xx_pnp_probe, 1168 .probe = snd_opti9xx_pnp_probe,
1173 .remove = snd_opti9xx_pnp_remove, 1169 .remove = snd_opti9xx_pnp_remove,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 030ca8652a1c..9f3586276871 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1781,6 +1781,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
1781 struct snd_pcm_chmap *chmap; 1781 struct snd_pcm_chmap *chmap;
1782 struct snd_kcontrol *kctl; 1782 struct snd_kcontrol *kctl;
1783 int i; 1783 int i;
1784
1785 if (!codec->pcm_info[pin_idx].pcm)
1786 break;
1784 err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm, 1787 err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm,
1785 SNDRV_PCM_STREAM_PLAYBACK, 1788 SNDRV_PCM_STREAM_PLAYBACK,
1786 NULL, 0, pin_idx, &chmap); 1789 NULL, 0, pin_idx, &chmap);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f303cd898515..389db4c2801b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4336,6 +4336,7 @@ static const struct hda_fixup alc662_fixups[] = {
4336 4336
4337static const struct snd_pci_quirk alc662_fixup_tbl[] = { 4337static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4338 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2), 4338 SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
4339 SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
4339 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), 4340 SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
4340 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), 4341 SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
4341 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), 4342 SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),