aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/acpi/enumeration.txt6
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt6
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/mach-exynos/hotplug.c10
-rw-r--r--arch/arm/mach-exynos/platsmp.c34
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c6
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S9
-rw-r--r--arch/arm/mach-mvebu/pmsu.c10
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/kernel/efi-stub.c2
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h19
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c65
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/kernel/head.S6
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/pci/pci.c49
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/um/kernel/tlb.c9
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/skas/process.c9
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/header.S26
-rw-r--r--arch/x86/boot/tools/build.c38
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/espfix_64.c5
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--drivers/acpi/video.c10
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c22
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/base/platform.c18
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/firmware/efi/efi.c22
-rw-r--r--drivers/firmware/efi/fdt.c10
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c198
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/da9055-hwmon.c2
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/industrialio-event.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/iommu/fsl_pamu.c8
-rw-r--r--drivers/iommu/fsl_pamu_domain.c18
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c20
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/mtd/ubi/fastmap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c20
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/xen/balloon.c12
-rw-r--r--drivers/xen/manage.c5
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/volumes.c8
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3acl.c43
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c335
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_bmap.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c53
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_btree.c82
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_sb.c25
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/rcupdate.h46
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/locking/mcs_spinlock.c64
-rw-r--r--kernel/locking/mcs_spinlock.h9
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rwsem-spinlock.c28
-rw-r--r--kernel/locking/rwsem-xadd.c16
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/tree.c140
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c44
-rw-r--r--net/batman-adv/soft-interface.c60
-rw-r--r--net/batman-adv/translation-table.c26
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/netfilter/nf_tables_api.c140
-rw-r--r--net/netfilter/nf_tables_core.c10
-rw-r--r--net/sched/cls_u32.c19
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h4
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h8
-rw-r--r--tools/lib/lockdep/preload.c20
189 files changed, 1904 insertions, 942 deletions
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index fd786ea13a1f..e182be5e3c83 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
60configuring GPIOs it can get its ACPI handle and extract this information 60configuring GPIOs it can get its ACPI handle and extract this information
61from ACPI tables. 61from ACPI tables.
62 62
63Currently the kernel is not able to automatically determine from which ACPI
64device it should make the corresponding platform device so we need to add
65the ACPI device explicitly to acpi_platform_device_ids list defined in
66drivers/acpi/acpi_platform.c. This limitation is only for the platform
67devices, SPI and I2C devices are created automatically as described below.
68
69DMA support 63DMA support
70~~~~~~~~~~~ 64~~~~~~~~~~~
71DMA controllers enumerated via ACPI should be registered in the system to 65DMA controllers enumerated via ACPI should be registered in the system to
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index f055515d2b62..366690cb86a3 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
8under node /cpus/cpu@0. 8under node /cpus/cpu@0.
9 9
10Required properties: 10Required properties:
11- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt 11- None
12 for details
13 12
14Optional properties: 13Optional properties:
14- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
15 details. OPPs *must* be supplied either via DT, i.e. this property, or
16 populated at runtime.
15- clock-latency: Specify the possible maximum transition latency for clock, 17- clock-latency: Specify the possible maximum transition latency for clock,
16 in unit of nanoseconds. 18 in unit of nanoseconds.
17- voltage-tolerance: Specify the CPU voltage tolerance in percentage. 19- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c1b9aa8c5a52..b7fa2f599459 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2790 leaf rcu_node structure. Useful for very large 2790 leaf rcu_node structure. Useful for very large
2791 systems. 2791 systems.
2792 2792
2793 rcutree.jiffies_till_sched_qs= [KNL]
2794 Set required age in jiffies for a
2795 given grace period before RCU starts
2796 soliciting quiescent-state help from
2797 rcu_note_context_switch().
2798
2793 rcutree.jiffies_till_first_fqs= [KNL] 2799 rcutree.jiffies_till_first_fqs= [KNL]
2794 Set delay from grace-period initialization to 2800 Set delay from grace-period initialization to
2795 first attempt to force quiescent states. 2801 first attempt to force quiescent states.
@@ -3526,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3526 the allocated input device; If set to 0, video driver 3532 the allocated input device; If set to 0, video driver
3527 will only send out the event without touching backlight 3533 will only send out the event without touching backlight
3528 brightness level. 3534 brightness level.
3529 default: 0 3535 default: 1
3530 3536
3531 virtio_mmio.device= 3537 virtio_mmio.device=
3532 [VMMIO] Memory mapped virtio (platform) device. 3538 [VMMIO] Memory mapped virtio (platform) device.
diff --git a/MAINTAINERS b/MAINTAINERS
index ae8cd00215b2..78215a5dea28 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8010,6 +8010,16 @@ F: drivers/ata/
8010F: include/linux/ata.h 8010F: include/linux/ata.h
8011F: include/linux/libata.h 8011F: include/linux/libata.h
8012 8012
8013SERIAL ATA AHCI PLATFORM devices support
8014M: Hans de Goede <hdegoede@redhat.com>
8015M: Tejun Heo <tj@kernel.org>
8016L: linux-ide@vger.kernel.org
8017T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8018S: Supported
8019F: drivers/ata/ahci_platform.c
8020F: drivers/ata/libahci_platform.c
8021F: include/linux/ahci_platform.h
8022
8013SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8023SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
8014M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8024M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
8015L: linux-scsi@vger.kernel.org 8025L: linux-scsi@vger.kernel.org
diff --git a/Makefile b/Makefile
index f3c543df4697..6b2774145d66 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc5 4EXTRAVERSION = -rc6
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 245058b3b0ef..88acf8bc1490 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,6 +6,7 @@ config ARM
6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
7 select ARCH_HAVE_CUSTOM_GPIO_H 7 select ARCH_HAVE_CUSTOM_GPIO_H
8 select ARCH_MIGHT_HAVE_PC_PARPORT 8 select ARCH_MIGHT_HAVE_PC_PARPORT
9 select ARCH_SUPPORTS_ATOMIC_RMW
9 select ARCH_USE_BUILTIN_BSWAP 10 select ARCH_USE_BUILTIN_BSWAP
10 select ARCH_USE_CMPXCHG_LOCKREF 11 select ARCH_USE_CMPXCHG_LOCKREF
11 select ARCH_WANT_IPC_PARSE_VERSION 12 select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 287795985e32..b84bac5bada4 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -925,7 +925,7 @@
925 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 925 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
926 reg = <0x00500000 0x00100000>; 926 reg = <0x00500000 0x00100000>;
927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
928 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 928 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
929 <&uhpck>; 929 <&uhpck>;
930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
931 status = "disabled"; 931 status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 2ebc42140ea6..2c0d6ea3ab41 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -1124,6 +1124,7 @@
1124 compatible = "atmel,at91sam9rl-pwm"; 1124 compatible = "atmel,at91sam9rl-pwm";
1125 reg = <0xf8034000 0x300>; 1125 reg = <0xf8034000 0x300>;
1126 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>; 1126 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
1127 clocks = <&pwm_clk>;
1127 #pwm-cells = <3>; 1128 #pwm-cells = <3>;
1128 status = "disabled"; 1129 status = "disabled";
1129 }; 1130 };
@@ -1155,8 +1156,7 @@
1155 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 1156 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
1156 reg = <0x00600000 0x100000>; 1157 reg = <0x00600000 0x100000>;
1157 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 1158 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
1158 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 1159 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
1159 <&uhpck>;
1160 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 1160 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
1161 status = "disabled"; 1161 status = "disabled";
1162 }; 1162 };
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 8a134d019cb3..920a4baa53cd 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -40,15 +40,17 @@ static inline void cpu_leave_lowpower(void)
40 40
41static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 41static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
42{ 42{
43 u32 mpidr = cpu_logical_map(cpu);
44 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
45
43 for (;;) { 46 for (;;) {
44 47
45 /* make cpu1 to be turned off at next WFI command */ 48 /* Turn the CPU off on next WFI instruction. */
46 if (cpu == 1) 49 exynos_cpu_power_down(core_id);
47 exynos_cpu_power_down(cpu);
48 50
49 wfi(); 51 wfi();
50 52
51 if (pen_release == cpu_logical_map(cpu)) { 53 if (pen_release == core_id) {
52 /* 54 /*
53 * OK, proper wakeup, we're done 55 * OK, proper wakeup, we're done
54 */ 56 */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..50b9aad5e27b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
91{ 91{
92 unsigned long timeout; 92 unsigned long timeout;
93 unsigned long phys_cpu = cpu_logical_map(cpu); 93 u32 mpidr = cpu_logical_map(cpu);
94 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
94 int ret = -ENOSYS; 95 int ret = -ENOSYS;
95 96
96 /* 97 /*
@@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
104 * the holding pen - release it, then wait for it to flag 105 * the holding pen - release it, then wait for it to flag
105 * that it has been released by resetting pen_release. 106 * that it has been released by resetting pen_release.
106 * 107 *
107 * Note that "pen_release" is the hardware CPU ID, whereas 108 * Note that "pen_release" is the hardware CPU core ID, whereas
108 * "cpu" is Linux's internal ID. 109 * "cpu" is Linux's internal ID.
109 */ 110 */
110 write_pen_release(phys_cpu); 111 write_pen_release(core_id);
111 112
112 if (!exynos_cpu_power_state(cpu)) { 113 if (!exynos_cpu_power_state(core_id)) {
113 exynos_cpu_power_up(cpu); 114 exynos_cpu_power_up(core_id);
114 timeout = 10; 115 timeout = 10;
115 116
116 /* wait max 10 ms until cpu1 is on */ 117 /* wait max 10 ms until cpu1 is on */
117 while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) { 118 while (exynos_cpu_power_state(core_id)
119 != S5P_CORE_LOCAL_PWR_EN) {
118 if (timeout-- == 0) 120 if (timeout-- == 0)
119 break; 121 break;
120 122
@@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
145 * Try to set boot address using firmware first 147 * Try to set boot address using firmware first
146 * and fall back to boot register if it fails. 148 * and fall back to boot register if it fails.
147 */ 149 */
148 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 150 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
149 if (ret && ret != -ENOSYS) 151 if (ret && ret != -ENOSYS)
150 goto fail; 152 goto fail;
151 if (ret == -ENOSYS) { 153 if (ret == -ENOSYS) {
152 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 154 void __iomem *boot_reg = cpu_boot_reg(core_id);
153 155
154 if (IS_ERR(boot_reg)) { 156 if (IS_ERR(boot_reg)) {
155 ret = PTR_ERR(boot_reg); 157 ret = PTR_ERR(boot_reg);
156 goto fail; 158 goto fail;
157 } 159 }
158 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 160 __raw_writel(boot_addr, cpu_boot_reg(core_id));
159 } 161 }
160 162
161 call_firmware_op(cpu_boot, phys_cpu); 163 call_firmware_op(cpu_boot, core_id);
162 164
163 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 165 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
164 166
@@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
227 * boot register if it fails. 229 * boot register if it fails.
228 */ 230 */
229 for (i = 1; i < max_cpus; ++i) { 231 for (i = 1; i < max_cpus; ++i) {
230 unsigned long phys_cpu;
231 unsigned long boot_addr; 232 unsigned long boot_addr;
233 u32 mpidr;
234 u32 core_id;
232 int ret; 235 int ret;
233 236
234 phys_cpu = cpu_logical_map(i); 237 mpidr = cpu_logical_map(i);
238 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
235 boot_addr = virt_to_phys(exynos4_secondary_startup); 239 boot_addr = virt_to_phys(exynos4_secondary_startup);
236 240
237 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 241 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
238 if (ret && ret != -ENOSYS) 242 if (ret && ret != -ENOSYS)
239 break; 243 break;
240 if (ret == -ENOSYS) { 244 if (ret == -ENOSYS) {
241 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 245 void __iomem *boot_reg = cpu_boot_reg(core_id);
242 246
243 if (IS_ERR(boot_reg)) 247 if (IS_ERR(boot_reg))
244 break; 248 break;
245 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 249 __raw_writel(boot_addr, cpu_boot_reg(core_id));
246 } 250 }
247 } 251 }
248} 252}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 8e795dea02ec..8556c787e59c 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
70static const char *lvds_sels[] = { 70static const char *lvds_sels[] = {
71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy", 71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref", 72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
73 "pcie_ref", "sata_ref", 73 "pcie_ref_125m", "sata_ref_100m",
74}; 74};
75 75
76enum mx6q_clks { 76enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
491 491
492 /* All existing boards with PCIe use LVDS1 */ 492 /* All existing boards with PCIe use LVDS1 */
493 if (IS_ENABLED(CONFIG_PCI_IMX6)) 493 if (IS_ENABLED(CONFIG_PCI_IMX6))
494 clk_set_parent(clk[lvds1_sel], clk[sata_ref]); 494 clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
495 495
496 /* Set initial power mode */ 496 /* Set initial power mode */
497 imx6q_set_lpm(WAIT_CLOCKED); 497 imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 477202fd39cc..2bdc3233abe2 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
292 .notifier_call = mvebu_hwcc_notifier, 292 .notifier_call = mvebu_hwcc_notifier,
293}; 293};
294 294
295static struct notifier_block mvebu_hwcc_pci_nb = {
296 .notifier_call = mvebu_hwcc_notifier,
297};
298
295static void __init armada_370_coherency_init(struct device_node *np) 299static void __init armada_370_coherency_init(struct device_node *np)
296{ 300{
297 struct resource res; 301 struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
427{ 431{
428 if (coherency_available()) 432 if (coherency_available())
429 bus_register_notifier(&pci_bus_type, 433 bus_register_notifier(&pci_bus_type,
430 &mvebu_hwcc_nb); 434 &mvebu_hwcc_pci_nb);
431 return 0; 435 return 0;
432} 436}
433 437
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..da5bb292b91c 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -15,6 +15,8 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#include <asm/assembler.h>
19
18 __CPUINIT 20 __CPUINIT
19#define CPU_RESUME_ADDR_REG 0xf10182d4 21#define CPU_RESUME_ADDR_REG 0xf10182d4
20 22
@@ -22,13 +24,18 @@
22.global armada_375_smp_cpu1_enable_code_end 24.global armada_375_smp_cpu1_enable_code_end
23 25
24armada_375_smp_cpu1_enable_code_start: 26armada_375_smp_cpu1_enable_code_start:
25 ldr r0, [pc, #4] 27ARM_BE8(setend be)
28 adr r0, 1f
29 ldr r0, [r0]
26 ldr r1, [r0] 30 ldr r1, [r0]
31ARM_BE8(rev r1, r1)
27 mov pc, r1 32 mov pc, r1
331:
28 .word CPU_RESUME_ADDR_REG 34 .word CPU_RESUME_ADDR_REG
29armada_375_smp_cpu1_enable_code_end: 35armada_375_smp_cpu1_enable_code_end:
30 36
31ENTRY(mvebu_cortex_a9_secondary_startup) 37ENTRY(mvebu_cortex_a9_secondary_startup)
38ARM_BE8(setend be)
32 bl v7_invalidate_l1 39 bl v7_invalidate_l1
33 b secondary_startup 40 b secondary_startup
34ENDPROC(mvebu_cortex_a9_secondary_startup) 41ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index a1d407c0febe..25aa8237d668 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -201,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
201 201
202 /* Test the CR_C bit and set it if it was cleared */ 202 /* Test the CR_C bit and set it if it was cleared */
203 asm volatile( 203 asm volatile(
204 "mrc p15, 0, %0, c1, c0, 0 \n\t" 204 "mrc p15, 0, r0, c1, c0, 0 \n\t"
205 "tst %0, #(1 << 2) \n\t" 205 "tst r0, #(1 << 2) \n\t"
206 "orreq %0, %0, #(1 << 2) \n\t" 206 "orreq r0, r0, #(1 << 2) \n\t"
207 "mcreq p15, 0, %0, c1, c0, 0 \n\t" 207 "mcreq p15, 0, r0, c1, c0, 0 \n\t"
208 "isb " 208 "isb "
209 : : "r" (0)); 209 : : : "r0");
210 210
211 pr_warn("Failed to suspend the system\n"); 211 pr_warn("Failed to suspend the system\n");
212 212
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de346be6..839f48c26ef0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@ config ARM64
4 select ARCH_HAS_OPP 4 select ARCH_HAS_OPP
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
6 select ARCH_USE_CMPXCHG_LOCKREF 6 select ARCH_USE_CMPXCHG_LOCKREF
7 select ARCH_SUPPORTS_ATOMIC_RMW
7 select ARCH_WANT_OPTIONAL_GPIOLIB 8 select ARCH_WANT_OPTIONAL_GPIOLIB
8 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 9 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
9 select ARCH_WANT_FRAME_POINTERS 10 select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 60e98a639ac5..e786e6cdc400 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -12,8 +12,6 @@
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/libfdt.h> 13#include <linux/libfdt.h>
14#include <asm/sections.h> 14#include <asm/sections.h>
15#include <generated/compile.h>
16#include <generated/utsrelease.h>
17 15
18/* 16/*
19 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from 17 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fefe7c8bf05f..80b94b0add1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@ config PPC
145 select HAVE_IRQ_EXIT_ON_IRQ_STACK 145 select HAVE_IRQ_EXIT_ON_IRQ_STACK
146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64 146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
147 select HAVE_ARCH_AUDITSYSCALL 147 select HAVE_ARCH_AUDITSYSCALL
148 select ARCH_SUPPORTS_ATOMIC_RMW
148 149
149config GENERIC_CSUM 150config GENERIC_CSUM
150 def_bool CPU_LITTLE_ENDIAN 151 def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b48ce9..d645428a65a4 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
198 return rb; 198 return rb;
199} 199}
200 200
201static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) 201static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
202 bool is_base_size)
202{ 203{
204
203 int size, a_psize; 205 int size, a_psize;
204 /* Look at the 8 bit LP value */ 206 /* Look at the 8 bit LP value */
205 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); 207 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
214 continue; 216 continue;
215 217
216 a_psize = __hpte_actual_psize(lp, size); 218 a_psize = __hpte_actual_psize(lp, size);
217 if (a_psize != -1) 219 if (a_psize != -1) {
220 if (is_base_size)
221 return 1ul << mmu_psize_defs[size].shift;
218 return 1ul << mmu_psize_defs[a_psize].shift; 222 return 1ul << mmu_psize_defs[a_psize].shift;
223 }
219 } 224 }
220 225
221 } 226 }
222 return 0; 227 return 0;
223} 228}
224 229
230static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
231{
232 return __hpte_page_size(h, l, 0);
233}
234
235static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
236{
237 return __hpte_page_size(h, l, 1);
238}
239
225static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 240static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
226{ 241{
227 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 242 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9ea266eae33e..7e4612528546 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -277,6 +277,8 @@ n:
277 .globl n; \ 277 .globl n; \
278n: 278n:
279 279
280#define _GLOBAL_TOC(name) _GLOBAL(name)
281
280#define _KPROBE(n) \ 282#define _KPROBE(n) \
281 .section ".kprobes.text","a"; \ 283 .section ".kprobes.text","a"; \
282 .globl n; \ 284 .globl n; \
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..68468d695f12 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1562 goto out; 1562 goto out;
1563 } 1563 }
1564 if (!rma_setup && is_vrma_hpte(v)) { 1564 if (!rma_setup && is_vrma_hpte(v)) {
1565 unsigned long psize = hpte_page_size(v, r); 1565 unsigned long psize = hpte_base_page_size(v, r);
1566 unsigned long senc = slb_pgsize_encoding(psize); 1566 unsigned long senc = slb_pgsize_encoding(psize);
1567 unsigned long lpcr; 1567 unsigned long lpcr;
1568 1568
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..5a24d3c2b6b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
814 r = hpte[i+1]; 814 r = hpte[i+1];
815 815
816 /* 816 /*
817 * Check the HPTE again, including large page size 817 * Check the HPTE again, including base page size
818 * Since we don't currently allow any MPSS (mixed
819 * page-size segment) page sizes, it is sufficient
820 * to check against the actual page size.
821 */ 818 */
822 if ((v & valid) && (v & mask) == val && 819 if ((v & valid) && (v & mask) == val &&
823 hpte_page_size(v, r) == (1ul << pshift)) 820 hpte_base_page_size(v, r) == (1ul << pshift))
824 /* Return with the HPTE still locked */ 821 /* Return with the HPTE still locked */
825 return (hash << 3) + (i >> 1); 822 return (hash << 3) + (i >> 1);
826 823
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..558a67df8126 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
48 * 48 *
49 * LR = return address to continue at after eventually re-enabling MMU 49 * LR = return address to continue at after eventually re-enabling MMU
50 */ 50 */
51_GLOBAL(kvmppc_hv_entry_trampoline) 51_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
52 mflr r0 52 mflr r0
53 std r0, PPC_LR_STKOFF(r1) 53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1) 54 stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#if defined(_CALL_ELF) && _CALL_ELF == 2
29#define FUNC(name) name
30#else
28#define FUNC(name) GLUE(.,name) 31#define FUNC(name) GLUE(.,name)
32#endif
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU 33#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30 34
31#elif defined(CONFIG_PPC_BOOK3S_32) 35#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
36 36
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#if defined(_CALL_ELF) && _CALL_ELF == 2
40#define FUNC(name) name
41#else
39#define FUNC(name) GLUE(.,name) 42#define FUNC(name) GLUE(.,name)
43#endif
40 44
41#elif defined(CONFIG_PPC_BOOK3S_32) 45#elif defined(CONFIG_PPC_BOOK3S_32)
42 46
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
146 * On entry, r4 contains the guest shadow MSR 150 * On entry, r4 contains the guest shadow MSR
147 * MSR.EE has to be 0 when calling this function 151 * MSR.EE has to be 0 when calling this function
148 */ 152 */
149_GLOBAL(kvmppc_entry_trampoline) 153_GLOBAL_TOC(kvmppc_entry_trampoline)
150 mfmsr r5 154 mfmsr r5
151 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 155 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
152 toreal(r7) 156 toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba992b3..ef27fbd5d9c5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
23 u32 irq, server, priority; 23 u32 irq, server, priority;
24 int rc; 24 int rc;
25 25
26 if (args->nargs != 3 || args->nret != 1) { 26 if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
27 rc = -3; 27 rc = -3;
28 goto out; 28 goto out;
29 } 29 }
30 30
31 irq = args->args[0]; 31 irq = be32_to_cpu(args->args[0]);
32 server = args->args[1]; 32 server = be32_to_cpu(args->args[1]);
33 priority = args->args[2]; 33 priority = be32_to_cpu(args->args[2]);
34 34
35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); 35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
36 if (rc) 36 if (rc)
37 rc = -3; 37 rc = -3;
38out: 38out:
39 args->rets[0] = rc; 39 args->rets[0] = cpu_to_be32(rc);
40} 40}
41 41
42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) 42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
44 u32 irq, server, priority; 44 u32 irq, server, priority;
45 int rc; 45 int rc;
46 46
47 if (args->nargs != 1 || args->nret != 3) { 47 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
48 rc = -3; 48 rc = -3;
49 goto out; 49 goto out;
50 } 50 }
51 51
52 irq = args->args[0]; 52 irq = be32_to_cpu(args->args[0]);
53 53
54 server = priority = 0; 54 server = priority = 0;
55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); 55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
58 goto out; 58 goto out;
59 } 59 }
60 60
61 args->rets[1] = server; 61 args->rets[1] = cpu_to_be32(server);
62 args->rets[2] = priority; 62 args->rets[2] = cpu_to_be32(priority);
63out: 63out:
64 args->rets[0] = rc; 64 args->rets[0] = cpu_to_be32(rc);
65} 65}
66 66
67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) 67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
69 u32 irq; 69 u32 irq;
70 int rc; 70 int rc;
71 71
72 if (args->nargs != 1 || args->nret != 1) { 72 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
73 rc = -3; 73 rc = -3;
74 goto out; 74 goto out;
75 } 75 }
76 76
77 irq = args->args[0]; 77 irq = be32_to_cpu(args->args[0]);
78 78
79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); 79 rc = kvmppc_xics_int_off(vcpu->kvm, irq);
80 if (rc) 80 if (rc)
81 rc = -3; 81 rc = -3;
82out: 82out:
83 args->rets[0] = rc; 83 args->rets[0] = cpu_to_be32(rc);
84} 84}
85 85
86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) 86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
88 u32 irq; 88 u32 irq;
89 int rc; 89 int rc;
90 90
91 if (args->nargs != 1 || args->nret != 1) { 91 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
92 rc = -3; 92 rc = -3;
93 goto out; 93 goto out;
94 } 94 }
95 95
96 irq = args->args[0]; 96 irq = be32_to_cpu(args->args[0]);
97 97
98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); 98 rc = kvmppc_xics_int_on(vcpu->kvm, irq);
99 if (rc) 99 if (rc)
100 rc = -3; 100 rc = -3;
101out: 101out:
102 args->rets[0] = rc; 102 args->rets[0] = cpu_to_be32(rc);
103} 103}
104#endif /* CONFIG_KVM_XICS */ 104#endif /* CONFIG_KVM_XICS */
105 105
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
205 return rc; 205 return rc;
206} 206}
207 207
208static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
209{
210#ifdef __LITTLE_ENDIAN__
211 int i;
212
213 args->token = be32_to_cpu(args->token);
214 args->nargs = be32_to_cpu(args->nargs);
215 args->nret = be32_to_cpu(args->nret);
216 for (i = 0; i < args->nargs; i++)
217 args->args[i] = be32_to_cpu(args->args[i]);
218#endif
219}
220
221static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
222{
223#ifdef __LITTLE_ENDIAN__
224 int i;
225
226 for (i = 0; i < args->nret; i++)
227 args->args[i] = cpu_to_be32(args->args[i]);
228 args->token = cpu_to_be32(args->token);
229 args->nargs = cpu_to_be32(args->nargs);
230 args->nret = cpu_to_be32(args->nret);
231#endif
232}
233
234int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) 208int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
235{ 209{
236 struct rtas_token_definition *d; 210 struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
249 if (rc) 223 if (rc)
250 goto fail; 224 goto fail;
251 225
252 kvmppc_rtas_swap_endian_in(&args);
253
254 /* 226 /*
255 * args->rets is a pointer into args->args. Now that we've 227 * args->rets is a pointer into args->args. Now that we've
256 * copied args we need to fix it up to point into our copy, 228 * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
258 * value so we can restore it on the way out. 230 * value so we can restore it on the way out.
259 */ 231 */
260 orig_rets = args.rets; 232 orig_rets = args.rets;
261 args.rets = &args.args[args.nargs]; 233 args.rets = &args.args[be32_to_cpu(args.nargs)];
262 234
263 mutex_lock(&vcpu->kvm->lock); 235 mutex_lock(&vcpu->kvm->lock);
264 236
265 rc = -ENOENT; 237 rc = -ENOENT;
266 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { 238 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
267 if (d->token == args.token) { 239 if (d->token == be32_to_cpu(args.token)) {
268 d->handler->handler(vcpu, &args); 240 d->handler->handler(vcpu, &args);
269 rc = 0; 241 rc = 0;
270 break; 242 break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
275 247
276 if (rc == 0) { 248 if (rc == 0) {
277 args.rets = orig_rets; 249 args.rets = orig_rets;
278 kvmppc_rtas_swap_endian_out(&args);
279 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); 250 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
280 if (rc) 251 if (rc)
281 goto fail; 252 goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..86903d3f5a03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
473 if (printk_ratelimit()) 473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n", 474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn); 475 __func__, (long)gfn, pfn);
476 return -EINVAL; 476 ret = -EINVAL;
477 goto out;
477 } 478 }
478 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 479 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
479 480
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70cd59e..18ea9e3f8142 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
51 return 0; 51 return 0;
52 52
53 asm volatile( 53 asm volatile(
54 "0: lfpc %1\n" 54 " lfpc %1\n"
55 " la %0,0\n" 55 "0: la %0,0\n"
56 "1:\n" 56 "1:\n"
57 EX_TABLE(0b,1b) 57 EX_TABLE(0b,1b)
58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); 58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d6784510..e88d35d74950 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100efea, 0xf46ce800, 0x00400000 440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efea, 0xf46c0000 442 .long 2, 0xc100eff2, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
444 .long 2, 0xc100efea, 0xf0680000 444 .long 2, 0xc100eff2, 0xf0680000
445#elif defined(CONFIG_MARCH_Z9_109) 445#elif defined(CONFIG_MARCH_Z9_109)
446 .long 1, 0xc100efc2 446 .long 1, 0xc100efc2
447#elif defined(CONFIG_MARCH_Z990) 447#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734b5b1..5dc7ad9e2fbf 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
334 unsigned long mask = PSW_MASK_USER; 334 unsigned long mask = PSW_MASK_USER;
335 335
336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; 336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
337 if ((data & ~mask) != PSW_USER_BITS) 337 if ((data ^ PSW_USER_BITS) & ~mask)
338 /* Invalid psw mask. */
339 return -EINVAL;
340 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
341 /* Invalid address-space-control bits */
338 return -EINVAL; 342 return -EINVAL;
339 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) 343 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
344 /* Invalid addressing mode bits */
340 return -EINVAL; 345 return -EINVAL;
341 } 346 }
342 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 347 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
672 677
673 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 678 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
674 /* Build a 64 bit psw mask from 31 bit mask. */ 679 /* Build a 64 bit psw mask from 31 bit mask. */
675 if ((tmp & ~mask) != PSW32_USER_BITS) 680 if ((tmp ^ PSW32_USER_BITS) & ~mask)
676 /* Invalid psw mask. */ 681 /* Invalid psw mask. */
677 return -EINVAL; 682 return -EINVAL;
683 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
684 /* Invalid address-space-control bits */
685 return -EINVAL;
678 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 686 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
679 (regs->psw.mask & PSW_MASK_BA) | 687 (regs->psw.mask & PSW_MASK_BA) |
680 (__u64)(tmp & mask) << 32; 688 (__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51eeb8d6..30de42730b2f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
48static LIST_HEAD(zpci_list); 48static LIST_HEAD(zpci_list);
49static DEFINE_SPINLOCK(zpci_list_lock); 49static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static void zpci_enable_irq(struct irq_data *data);
52static void zpci_disable_irq(struct irq_data *data);
53
54static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
55 .name = "zPCI", 52 .name = "zPCI",
56 .irq_unmask = zpci_enable_irq, 53 .irq_unmask = unmask_msi_irq,
57 .irq_mask = zpci_disable_irq, 54 .irq_mask = mask_msi_irq,
58}; 55};
59 56
60static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
244 return rc; 241 return rc;
245} 242}
246 243
247static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
248{
249 int offset, pos;
250 u32 mask_bits;
251
252 if (msi->msi_attrib.is_msix) {
253 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
254 PCI_MSIX_ENTRY_VECTOR_CTRL;
255 msi->masked = readl(msi->mask_base + offset);
256 writel(flag, msi->mask_base + offset);
257 } else if (msi->msi_attrib.maskbit) {
258 pos = (long) msi->mask_base;
259 pci_read_config_dword(msi->dev, pos, &mask_bits);
260 mask_bits &= ~(mask);
261 mask_bits |= flag & mask;
262 pci_write_config_dword(msi->dev, pos, mask_bits);
263 } else
264 return 0;
265
266 msi->msi_attrib.maskbit = !!flag;
267 return 1;
268}
269
270static void zpci_enable_irq(struct irq_data *data)
271{
272 struct msi_desc *msi = irq_get_msi_desc(data->irq);
273
274 zpci_msi_set_mask_bits(msi, 1, 0);
275}
276
277static void zpci_disable_irq(struct irq_data *data)
278{
279 struct msi_desc *msi = irq_get_msi_desc(data->irq);
280
281 zpci_msi_set_mask_bits(msi, 1, 1);
282}
283
284void pcibios_fixup_bus(struct pci_bus *bus) 244void pcibios_fixup_bus(struct pci_bus *bus)
285{ 245{
286} 246}
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
487 447
488 /* Release MSI interrupts */ 448 /* Release MSI interrupts */
489 list_for_each_entry(msi, &pdev->msi_list, list) { 449 list_for_each_entry(msi, &pdev->msi_list, list) {
490 zpci_msi_set_mask_bits(msi, 1, 1); 450 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1);
452 else
453 default_msi_mask_irq(msi, 1, 1);
491 irq_set_msi_desc(msi->irq, NULL); 454 irq_set_msi_desc(msi->irq, NULL);
492 irq_free_desc(msi->irq); 455 irq_free_desc(msi->irq);
493 msi->msg.address_lo = 0; 456 msi->msg.address_lo = 0;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 29f2e988c56a..407c87d9879a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,6 +78,7 @@ config SPARC64
78 select HAVE_C_RECORDMCOUNT 78 select HAVE_C_RECORDMCOUNT
79 select NO_BOOTMEM 79 select NO_BOOTMEM
80 select HAVE_ARCH_AUDITSYSCALL 80 select HAVE_ARCH_AUDITSYSCALL
81 select ARCH_SUPPORTS_ATOMIC_RMW
81 82
82config ARCH_DEFCONFIG 83config ARCH_DEFCONFIG
83 string 84 string
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274fb961a..42f2bca1d338 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
410#define __NR_finit_module 342 410#define __NR_finit_module 342
411#define __NR_sched_setattr 343 411#define __NR_sched_setattr 343
412#define __NR_sched_getattr 344 412#define __NR_sched_getattr 344
413#define __NR_renameat2 345
413 414
414#define NR_syscalls 345 415#define NR_syscalls 346
415 416
416/* Bitmask values returned from kern_features system call. */ 417/* Bitmask values returned from kern_features system call. */
417#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 418#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb18650c..f834224208ed 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
51SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
51 52
52 .globl sys32_mmap2 53 .globl sys32_mmap2
53sys32_mmap2: 54sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8766cc..85fe9b1087cd 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2bb26cf..33ecba2826ea 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2
90 91
91#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
92 93
@@ -165,3 +166,4 @@ sys_call_table:
165/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
166 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
167/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9472079471bb..f1b3eb14b855 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,6 +12,7 @@
12#include <mem_user.h> 12#include <mem_user.h>
13#include <os.h> 13#include <os.h>
14#include <skas.h> 14#include <skas.h>
15#include <kern_util.h>
15 16
16struct host_vm_change { 17struct host_vm_change {
17 struct host_vm_op { 18 struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
124 struct host_vm_op *last; 125 struct host_vm_op *last;
125 int ret = 0; 126 int ret = 0;
126 127
128 if ((addr >= STUB_START) && (addr < STUB_END))
129 return -EINVAL;
130
127 if (hvc->index != 0) { 131 if (hvc->index != 0) {
128 last = &hvc->ops[hvc->index - 1]; 132 last = &hvc->ops[hvc->index - 1];
129 if ((last->type == MUNMAP) && 133 if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
283 /* This is not an else because ret is modified above */ 287 /* This is not an else because ret is modified above */
284 if (ret) { 288 if (ret) {
285 printk(KERN_ERR "fix_range_common: failed, killing current " 289 printk(KERN_ERR "fix_range_common: failed, killing current "
286 "process\n"); 290 "process: %d\n", task_tgid_vnr(current));
291 /* We are under mmap_sem, release it such that current can terminate */
292 up_write(&current->mm->mmap_sem);
287 force_sig(SIGKILL, current); 293 force_sig(SIGKILL, current);
294 do_signal();
288 } 295 }
289} 296}
290 297
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 974b87474a99..5678c3571e7c 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
206 int is_write = FAULT_WRITE(fi); 206 int is_write = FAULT_WRITE(fi);
207 unsigned long address = FAULT_ADDRESS(fi); 207 unsigned long address = FAULT_ADDRESS(fi);
208 208
209 if (regs) 209 if (!is_user && regs)
210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); 210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
211 211
212 if (!is_user && (address >= start_vm) && (address < end_vm)) { 212 if (!is_user && (address >= start_vm) && (address < end_vm)) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d531879a4617..908579f2b0ab 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
54 54
55void wait_stub_done(int pid) 55void wait_stub_done(int pid)
56{ 56{
57 int n, status, err, bad_stop = 0; 57 int n, status, err;
58 58
59 while (1) { 59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
74 74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return; 76 return;
77 else
78 bad_stop = 1;
79 77
80bad_wait: 78bad_wait:
81 err = ptrace_dump_regs(pid); 79 err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
85 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
86 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
87 status); 85 status);
88 if (bad_stop) 86 fatal_sigsegv();
89 kill(pid, SIGKILL);
90 else
91 fatal_sigsegv();
92} 87}
93 88
94extern unsigned long current_stub_stack(void); 89extern unsigned long current_stub_stack(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749ef0fdc..d24887b645dc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,7 @@ config X86
131 select HAVE_CC_STACKPROTECTOR 131 select HAVE_CC_STACKPROTECTOR
132 select GENERIC_CPU_AUTOPROBE 132 select GENERIC_CPU_AUTOPROBE
133 select HAVE_ARCH_AUDITSYSCALL 133 select HAVE_ARCH_AUDITSYSCALL
134 select ARCH_SUPPORTS_ATOMIC_RMW
134 135
135config INSTRUCTION_DECODER 136config INSTRUCTION_DECODER
136 def_bool y 137 def_bool y
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 84c223479e3c..7a6d43a554d7 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@ bs_die:
91 91
92 .section ".bsdata", "a" 92 .section ".bsdata", "a"
93bugger_off_msg: 93bugger_off_msg:
94 .ascii "Direct floppy boot is not supported. " 94 .ascii "Use a boot loader.\r\n"
95 .ascii "Use a boot loader program instead.\r\n"
96 .ascii "\n" 95 .ascii "\n"
97 .ascii "Remove disk and press any key to reboot ...\r\n" 96 .ascii "Remove disk and press any key to reboot...\r\n"
98 .byte 0 97 .byte 0
99 98
100#ifdef CONFIG_EFI_STUB 99#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
108#else 107#else
109 .word 0x8664 # x86-64 108 .word 0x8664 # x86-64
110#endif 109#endif
111 .word 3 # nr_sections 110 .word 4 # nr_sections
112 .long 0 # TimeDateStamp 111 .long 0 # TimeDateStamp
113 .long 0 # PointerToSymbolTable 112 .long 0 # PointerToSymbolTable
114 .long 1 # NumberOfSymbols 113 .long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
250 .word 0 # NumberOfLineNumbers 249 .word 0 # NumberOfLineNumbers
251 .long 0x60500020 # Characteristics (section flags) 250 .long 0x60500020 # Characteristics (section flags)
252 251
252 #
253 # The offset & size fields are filled in by build.c.
254 #
255 .ascii ".bss"
256 .byte 0
257 .byte 0
258 .byte 0
259 .byte 0
260 .long 0
261 .long 0x0
262 .long 0 # Size of initialized data
263 # on disk
264 .long 0x0
265 .long 0 # PointerToRelocations
266 .long 0 # PointerToLineNumbers
267 .word 0 # NumberOfRelocations
268 .word 0 # NumberOfLineNumbers
269 .long 0xc8000080 # Characteristics (section flags)
270
253#endif /* CONFIG_EFI_STUB */ 271#endif /* CONFIG_EFI_STUB */
254 272
255 # Kernel attributes; used by setup. This is part 1 of the 273 # Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 1a2f2121cada..a7661c430cd9 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -143,7 +143,7 @@ static void usage(void)
143 143
144#ifdef CONFIG_EFI_STUB 144#ifdef CONFIG_EFI_STUB
145 145
146static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) 146static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
147{ 147{
148 unsigned int pe_header; 148 unsigned int pe_header;
149 unsigned short num_sections; 149 unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
164 put_unaligned_le32(size, section + 0x8); 164 put_unaligned_le32(size, section + 0x8);
165 165
166 /* section header vma field */ 166 /* section header vma field */
167 put_unaligned_le32(offset, section + 0xc); 167 put_unaligned_le32(vma, section + 0xc);
168 168
169 /* section header 'size of initialised data' field */ 169 /* section header 'size of initialised data' field */
170 put_unaligned_le32(size, section + 0x10); 170 put_unaligned_le32(datasz, section + 0x10);
171 171
172 /* section header 'file offset' field */ 172 /* section header 'file offset' field */
173 put_unaligned_le32(offset, section + 0x14); 173 put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
179 } 179 }
180} 180}
181 181
182static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
183{
184 update_pecoff_section_header_fields(section_name, offset, size, size, offset);
185}
186
182static void update_pecoff_setup_and_reloc(unsigned int size) 187static void update_pecoff_setup_and_reloc(unsigned int size)
183{ 188{
184 u32 setup_offset = 0x200; 189 u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
203 208
204 pe_header = get_unaligned_le32(&buf[0x3c]); 209 pe_header = get_unaligned_le32(&buf[0x3c]);
205 210
206 /* Size of image */
207 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
208
209 /* 211 /*
210 * Size of code: Subtract the size of the first sector (512 bytes) 212 * Size of code: Subtract the size of the first sector (512 bytes)
211 * which includes the header. 213 * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
220 update_pecoff_section_header(".text", text_start, text_sz); 222 update_pecoff_section_header(".text", text_start, text_sz);
221} 223}
222 224
225static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
226{
227 unsigned int pe_header;
228 unsigned int bss_sz = init_sz - file_sz;
229
230 pe_header = get_unaligned_le32(&buf[0x3c]);
231
232 /* Size of uninitialized data */
233 put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
234
235 /* Size of image */
236 put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
237
238 update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
239}
240
223static int reserve_pecoff_reloc_section(int c) 241static int reserve_pecoff_reloc_section(int c)
224{ 242{
225 /* Reserve 0x20 bytes for .reloc section */ 243 /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
259static inline void update_pecoff_setup_and_reloc(unsigned int size) {} 277static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
260static inline void update_pecoff_text(unsigned int text_start, 278static inline void update_pecoff_text(unsigned int text_start,
261 unsigned int file_sz) {} 279 unsigned int file_sz) {}
280static inline void update_pecoff_bss(unsigned int file_sz,
281 unsigned int init_sz) {}
262static inline void efi_stub_defaults(void) {} 282static inline void efi_stub_defaults(void) {}
263static inline void efi_stub_entry_update(void) {} 283static inline void efi_stub_entry_update(void) {}
264 284
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
310 330
311int main(int argc, char ** argv) 331int main(int argc, char ** argv)
312{ 332{
313 unsigned int i, sz, setup_sectors; 333 unsigned int i, sz, setup_sectors, init_sz;
314 int c; 334 int c;
315 u32 sys_size; 335 u32 sys_size;
316 struct stat sb; 336 struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
376 buf[0x1f1] = setup_sectors-1; 396 buf[0x1f1] = setup_sectors-1;
377 put_unaligned_le32(sys_size, &buf[0x1f4]); 397 put_unaligned_le32(sys_size, &buf[0x1f4]);
378 398
379 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 399 update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
400 init_sz = get_unaligned_le32(&buf[0x260]);
401 update_pecoff_bss(i + (sys_size * 16), init_sz);
380 402
381 efi_stub_entry_update(); 403 efi_stub_entry_update();
382 404
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f3a1f04ed4cb..584874451414 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
841 u32 eax; 841 u32 eax;
842 u8 ret = 0; 842 u8 ret = 0;
843 int idled = 0; 843 int idled = 0;
844 int polling;
845 int err = 0; 844 int err = 0;
846 845
847 if (!need_resched()) { 846 if (!need_resched()) {
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6afbb16e9b79..94d857fb1033 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
175 if (!pud_present(pud)) { 175 if (!pud_present(pud)) {
176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); 176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); 177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
178 paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); 178 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
179 for (n = 0; n < ESPFIX_PUD_CLONES; n++) 179 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
180 set_pud(&pud_p[n], pud); 180 set_pud(&pud_p[n], pud);
181 } 181 }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
185 if (!pmd_present(pmd)) { 185 if (!pmd_present(pmd)) {
186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); 186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); 187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
188 paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); 188 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
189 for (n = 0; n < ESPFIX_PMD_CLONES; n++) 189 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
190 set_pmd(&pmd_p[n], pmd); 190 set_pmd(&pmd_p[n], pmd);
191 } 191 }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
193 pte_p = pte_offset_kernel(&pmd, addr); 193 pte_p = pte_offset_kernel(&pmd, addr);
194 stack_page = (void *)__get_free_page(GFP_KERNEL); 194 stack_page = (void *)__get_free_page(GFP_KERNEL);
195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
196 paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
197 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 196 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
198 set_pte(&pte_p[n*PTE_STRIDE], pte); 197 set_pte(&pte_p[n*PTE_STRIDE], pte);
199 198
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f6449334ec45..ef432f891d30 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
5887 kvm_x86_ops->set_nmi(vcpu); 5887 kvm_x86_ops->set_nmi(vcpu);
5888 } 5888 }
5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
5890 /*
5891 * Because interrupts can be injected asynchronously, we are
5892 * calling check_nested_events again here to avoid a race condition.
5893 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
5894 * proposal and current concerns. Perhaps we should be setting
5895 * KVM_REQ_EVENT only on certain events and not unconditionally?
5896 */
5897 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5898 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5899 if (r != 0)
5900 return r;
5901 }
5890 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5902 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5891 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5903 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5892 false); 5904 false);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc494ece..28d227c5ca77 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
872{ 872{
873 lockdep_assert_held(q->queue_lock); 873 lockdep_assert_held(q->queue_lock);
874 874
875 /*
876 * @q could be exiting and already have destroyed all blkgs as
877 * indicated by NULL root_blkg. If so, don't confuse policies.
878 */
879 if (!q->root_blkg)
880 return;
881
875 blk_throtl_drain(q); 882 blk_throtl_drain(q);
876} 883}
877 884
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d8672268..a185b86741e5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
27EXPORT_SYMBOL(blk_queue_find_tag); 27EXPORT_SYMBOL(blk_queue_find_tag);
28 28
29/** 29/**
30 * __blk_free_tags - release a given set of tag maintenance info 30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free 31 * @bqt: the tag map to free
32 * 32 *
33 * Tries to free the specified @bqt. Returns true if it was 33 * Drop the reference count on @bqt and frees it when the last reference
34 * actually freed and false if there are still references using it 34 * is dropped.
35 */ 35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt) 36void blk_free_tags(struct blk_queue_tag *bqt)
37{ 37{
38 int retval; 38 if (atomic_dec_and_test(&bqt->refcnt)) {
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < 39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth); 40 bqt->max_depth);
44 41
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
50 47
51 kfree(bqt); 48 kfree(bqt);
52 } 49 }
53
54 return retval;
55} 50}
51EXPORT_SYMBOL(blk_free_tags);
56 52
57/** 53/**
58 * __blk_queue_free_tags - release tag maintenance info 54 * __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
69 if (!bqt) 65 if (!bqt)
70 return; 66 return;
71 67
72 __blk_free_tags(bqt); 68 blk_free_tags(bqt);
73 69
74 q->queue_tags = NULL; 70 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76} 72}
77 73
78/** 74/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info 75 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device 76 * @q: the request queue for the device
96 * 77 *
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67cb773..a0926a6094b2 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
690 case BLKROSET: 690 case BLKROSET:
691 case BLKDISCARD: 691 case BLKDISCARD:
692 case BLKSECDISCARD: 692 case BLKSECDISCARD:
693 case BLKZEROOUT:
693 /* 694 /*
694 * the ones below are implemented in blkdev_locked_ioctl, 695 * the ones below are implemented in blkdev_locked_ioctl,
695 * but we call blkdev_ioctl, which gets the lock for us 696 * but we call blkdev_ioctl, which gets the lock for us
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 071c1dfb93f3..350d52a8f781 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
68MODULE_DESCRIPTION("ACPI Video Driver"); 68MODULE_DESCRIPTION("ACPI Video Driver");
69MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
70 70
71static bool brightness_switch_enabled; 71static bool brightness_switch_enabled = 1;
72module_param(brightness_switch_enabled, bool, 0644); 72module_param(brightness_switch_enabled, bool, 0644);
73 73
74/* 74/*
@@ -581,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
581 }, 581 },
582 { 582 {
583 .callback = video_set_use_native_backlight, 583 .callback = video_set_use_native_backlight,
584 .ident = "HP ProBook 4540s",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
587 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
588 },
589 },
590 {
591 .callback = video_set_use_native_backlight,
584 .ident = "HP ProBook 2013 models", 592 .ident = "HP ProBook 2013 models",
585 .matches = { 593 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 594 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e1115..4cd52a4541a9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
456 456
457 /* Promise */ 457 /* Promise */
458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
459 { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
459 460
460 /* Asmedia */ 461 /* Asmedia */
461 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ 462 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d90..d19c37a7abc9 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4787 * ata_qc_new - Request an available ATA command, for queueing 4787 * ata_qc_new - Request an available ATA command, for queueing
4788 * @ap: target port 4788 * @ap: target port
4789 * 4789 *
4790 * Some ATA host controllers may implement a queue depth which is less
4791 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4792 * the hardware limitation.
4793 *
4790 * LOCKING: 4794 * LOCKING:
4791 * None. 4795 * None.
4792 */ 4796 */
@@ -4794,14 +4798,16 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4794static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4795{ 4799{
4796 struct ata_queued_cmd *qc = NULL; 4800 struct ata_queued_cmd *qc = NULL;
4797 unsigned int i, tag; 4801 unsigned int i, tag, max_queue;
4802
4803 max_queue = ap->scsi_host->can_queue;
4798 4804
4799 /* no command while frozen */ 4805 /* no command while frozen */
4800 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4806 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4801 return NULL; 4807 return NULL;
4802 4808
4803 for (i = 0; i < ATA_MAX_QUEUE; i++) { 4809 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4804 tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; 4810 tag = tag < max_queue ? tag : 0;
4805 4811
4806 /* the last tag is reserved for internal command. */ 4812 /* the last tag is reserved for internal command. */
4807 if (tag == ATA_TAG_INTERNAL) 4813 if (tag == ATA_TAG_INTERNAL)
@@ -6169,6 +6175,16 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6169{ 6175{
6170 int i, rc; 6176 int i, rc;
6171 6177
6178 /*
6179 * The max queue supported by hardware must not be greater than
6180 * ATA_MAX_QUEUE.
6181 */
6182 if (sht->can_queue > ATA_MAX_QUEUE) {
6183 dev_err(host->dev, "BUG: the hardware max queue is too large\n");
6184 WARN_ON(1);
6185 return -EINVAL;
6186 }
6187
6172 /* host must have been started */ 6188 /* host must have been started */
6173 if (!(host->flags & ATA_HOST_STARTED)) { 6189 if (!(host->flags & ATA_HOST_STARTED)) {
6174 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6190 dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b8..dad83df555c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1811 case ATA_DEV_ATA: 1811 case ATA_DEV_ATA:
1812 if (err & ATA_ICRC) 1812 if (err & ATA_ICRC)
1813 qc->err_mask |= AC_ERR_ATA_BUS; 1813 qc->err_mask |= AC_ERR_ATA_BUS;
1814 if (err & ATA_UNC) 1814 if (err & (ATA_UNC | ATA_AMNF))
1815 qc->err_mask |= AC_ERR_MEDIA; 1815 qc->err_mask |= AC_ERR_MEDIA;
1816 if (err & ATA_IDNF) 1816 if (err & ATA_IDNF)
1817 qc->err_mask |= AC_ERR_INVALID; 1817 qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
2556 } 2556 }
2557 2557
2558 if (cmd->command != ATA_CMD_PACKET && 2558 if (cmd->command != ATA_CMD_PACKET &&
2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2560 ATA_ABORTED))) 2560 ATA_IDNF | ATA_ABORTED)))
2561 ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 2561 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2562 res->feature & ATA_ICRC ? "ICRC " : "", 2562 res->feature & ATA_ICRC ? "ICRC " : "",
2563 res->feature & ATA_UNC ? "UNC " : "", 2563 res->feature & ATA_UNC ? "UNC " : "",
2564 res->feature & ATA_AMNF ? "AMNF " : "",
2564 res->feature & ATA_IDNF ? "IDNF " : "", 2565 res->feature & ATA_IDNF ? "IDNF " : "",
2565 res->feature & ATA_ABORTED ? "ABRT " : ""); 2566 res->feature & ATA_ABORTED ? "ABRT " : "");
2566#endif 2567#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce34..4d37c5415fc7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
915 struct ep93xx_pata_data *drv_data; 915 struct ep93xx_pata_data *drv_data;
916 struct ata_host *host; 916 struct ata_host *host;
917 struct ata_port *ap; 917 struct ata_port *ap;
918 unsigned int irq; 918 int irq;
919 struct resource *mem_res; 919 struct resource *mem_res;
920 void __iomem *ide_base; 920 void __iomem *ide_base;
921 int err; 921 int err;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e9227e1762d..eee48c49f5de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
89 return dev->archdata.irqs[num]; 89 return dev->archdata.irqs[num];
90#else 90#else
91 struct resource *r; 91 struct resource *r;
92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
93 return of_irq_get(dev->dev.of_node, num); 93 int ret;
94
95 ret = of_irq_get(dev->dev.of_node, num);
96 if (ret >= 0 || ret == -EPROBE_DEFER)
97 return ret;
98 }
94 99
95 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 100 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
96 101
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
133{ 138{
134 struct resource *r; 139 struct resource *r;
135 140
136 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 141 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
137 return of_irq_get_byname(dev->dev.of_node, name); 142 int ret;
143
144 ret = of_irq_get_byname(dev->dev.of_node, name);
145 if (ret >= 0 || ret == -EPROBE_DEFER)
146 return ret;
147 }
138 148
139 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 149 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
140 return r ? r->start : -ENXIO; 150 return r ? r->start : -ENXIO;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b7..3f2e16738080 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
544 struct task_struct *opa; 544 struct task_struct *opa;
545 545
546 kref_get(&connection->kref); 546 kref_get(&connection->kref);
547 /* We may just have force_sig()'ed this thread
548 * to get it out of some blocking network function.
549 * Clear signals; otherwise kthread_run(), which internally uses
550 * wait_on_completion_killable(), will mistake our pending signal
551 * for a new fatal signal and fail. */
552 flush_signals(current);
547 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); 553 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
548 if (IS_ERR(opa)) { 554 if (IS_ERR(opa)) {
549 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); 555 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 334601cc81cf..c4419ea1ab07 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
55static int data_avail; 55static int data_avail;
56static u8 *rng_buffer; 56static u8 *rng_buffer;
57 57
58static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
59 int wait);
60
58static size_t rng_buffer_size(void) 61static size_t rng_buffer_size(void)
59{ 62{
60 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 63 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
61} 64}
62 65
66static void add_early_randomness(struct hwrng *rng)
67{
68 unsigned char bytes[16];
69 int bytes_read;
70
71 /*
72 * Currently only virtio-rng cannot return data during device
73 * probe, and that's handled in virtio-rng.c itself. If there
74 * are more such devices, this call to rng_get_data can be
75 * made conditional here instead of doing it per-device.
76 */
77 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
78 if (bytes_read > 0)
79 add_device_randomness(bytes, bytes_read);
80}
81
63static inline int hwrng_init(struct hwrng *rng) 82static inline int hwrng_init(struct hwrng *rng)
64{ 83{
65 if (!rng->init) 84 if (rng->init) {
66 return 0; 85 int ret;
67 return rng->init(rng); 86
87 ret = rng->init(rng);
88 if (ret)
89 return ret;
90 }
91 add_early_randomness(rng);
92 return 0;
68} 93}
69 94
70static inline void hwrng_cleanup(struct hwrng *rng) 95static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
304{ 329{
305 int err = -EINVAL; 330 int err = -EINVAL;
306 struct hwrng *old_rng, *tmp; 331 struct hwrng *old_rng, *tmp;
307 unsigned char bytes[16];
308 int bytes_read;
309 332
310 if (rng->name == NULL || 333 if (rng->name == NULL ||
311 (rng->data_read == NULL && rng->read == NULL)) 334 (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
347 INIT_LIST_HEAD(&rng->list); 370 INIT_LIST_HEAD(&rng->list);
348 list_add_tail(&rng->list, &rng_list); 371 list_add_tail(&rng->list, &rng_list);
349 372
350 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 373 if (old_rng && !rng->init) {
351 if (bytes_read > 0) 374 /*
352 add_device_randomness(bytes, bytes_read); 375 * Use a new device's input to add some randomness to
376 * the system. If this rng device isn't going to be
377 * used right away, its init function hasn't been
378 * called yet; so only use the randomness from devices
379 * that don't need an init callback.
380 */
381 add_early_randomness(rng);
382 }
383
353out_unlock: 384out_unlock:
354 mutex_unlock(&rng_mutex); 385 mutex_unlock(&rng_mutex);
355out: 386out:
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f3e71501de54..e9b15bc18b4d 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -38,6 +38,8 @@ struct virtrng_info {
38 int index; 38 int index;
39}; 39};
40 40
41static bool probe_done;
42
41static void random_recv_done(struct virtqueue *vq) 43static void random_recv_done(struct virtqueue *vq)
42{ 44{
43 struct virtrng_info *vi = vq->vdev->priv; 45 struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
67 int ret; 69 int ret;
68 struct virtrng_info *vi = (struct virtrng_info *)rng->priv; 70 struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
69 71
72 /*
73 * Don't ask host for data till we're setup. This call can
74 * happen during hwrng_register(), after commit d9e7972619.
75 */
76 if (unlikely(!probe_done))
77 return 0;
78
70 if (!vi->busy) { 79 if (!vi->busy) {
71 vi->busy = true; 80 vi->busy = true;
72 init_completion(&vi->have_data); 81 init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
137 return err; 146 return err;
138 } 147 }
139 148
149 probe_done = true;
140 return 0; 150 return 0;
141} 151}
142 152
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a7b252..71529e196b84 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ retry:
641 } while (unlikely(entropy_count < pool_size-2 && pnfrac)); 641 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
642 } 642 }
643 643
644 if (entropy_count < 0) { 644 if (unlikely(entropy_count < 0)) {
645 pr_warn("random: negative entropy/overflow: pool %s count %d\n", 645 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
646 r->name, entropy_count); 646 r->name, entropy_count);
647 WARN_ON(1); 647 WARN_ON(1);
@@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
981 int reserved) 981 int reserved)
982{ 982{
983 int entropy_count, orig; 983 int entropy_count, orig;
984 size_t ibytes; 984 size_t ibytes, nfrac;
985 985
986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); 986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
987 987
@@ -999,7 +999,17 @@ retry:
999 } 999 }
1000 if (ibytes < min) 1000 if (ibytes < min)
1001 ibytes = 0; 1001 ibytes = 0;
1002 if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) 1002
1003 if (unlikely(entropy_count < 0)) {
1004 pr_warn("random: negative entropy count: pool %s count %d\n",
1005 r->name, entropy_count);
1006 WARN_ON(1);
1007 entropy_count = 0;
1008 }
1009 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1010 if ((size_t) entropy_count > nfrac)
1011 entropy_count -= nfrac;
1012 else
1003 entropy_count = 0; 1013 entropy_count = 0;
1004 1014
1005 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1015 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1376 "with %d bits of entropy available\n", 1386 "with %d bits of entropy available\n",
1377 current->comm, nonblocking_pool.entropy_total); 1387 current->comm, nonblocking_pool.entropy_total);
1378 1388
1389 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1379 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); 1390 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
1380 1391
1381 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), 1392 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ebac67115009..7364a538e056 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
104 tristate "Freescale i.MX6 cpufreq support" 104 tristate "Freescale i.MX6 cpufreq support"
105 depends on ARCH_MXC 105 depends on ARCH_MXC
106 depends on REGULATOR_ANATOP 106 depends on REGULATOR_ANATOP
107 select PM_OPP
107 help 108 help
108 This adds cpufreq driver support for Freescale i.MX6 series SoCs. 109 This adds cpufreq driver support for Freescale i.MX6 series SoCs.
109 110
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
118 If in doubt, say Y. 119 If in doubt, say Y.
119 120
120config ARM_KIRKWOOD_CPUFREQ 121config ARM_KIRKWOOD_CPUFREQ
121 def_bool MACH_KIRKWOOD 122 def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
122 help 123 help
123 This adds the CPUFreq driver for Marvell Kirkwood 124 This adds the CPUFreq driver for Marvell Kirkwood
124 SoCs. 125 SoCs.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ee1ae303a07c..86beda9f950b 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
152 goto out_put_reg; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 /* OPPs might be populated at runtime, don't check for error here */
156 if (ret) { 156 of_init_opp_table(cpu_dev);
157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_clk;
159 }
160 157
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 158 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 159 if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62259d27f03e..6f024852c6fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1153 * the creation of a brand new one. So we need to perform this update 1153 * the creation of a brand new one. So we need to perform this update
1154 * by invoking update_policy_cpu(). 1154 * by invoking update_policy_cpu().
1155 */ 1155 */
1156 if (recover_policy && cpu != policy->cpu) 1156 if (recover_policy && cpu != policy->cpu) {
1157 update_policy_cpu(policy, cpu); 1157 update_policy_cpu(policy, cpu);
1158 else 1158 WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
1159 } else {
1159 policy->cpu = cpu; 1160 policy->cpu = cpu;
1161 }
1160 1162
1161 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1163 cpumask_copy(policy->cpus, cpumask_of(cpu));
1162 1164
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 546376719d8f..b5befc211172 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
349 name = "K4S641632D"; 349 name = "K4S641632D";
350 if (machine_is_h3100()) 350 if (machine_is_h3100())
351 name = "KM416S4030CT"; 351 name = "KM416S4030CT";
352 if (machine_is_jornada720()) 352 if (machine_is_jornada720() || machine_is_h3600())
353 name = "K4S281632B-1H"; 353 name = "K4S281632B-1H";
354 if (machine_is_nanoengine()) 354 if (machine_is_nanoengine())
355 name = "MT48LC8M16A2TG-75"; 355 name = "MT48LC8M16A2TG-75";
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index eff1a2f22f09..dc79346689e6 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -346,6 +346,7 @@ static __initdata struct {
346 346
347struct param_info { 347struct param_info {
348 int verbose; 348 int verbose;
349 int found;
349 void *params; 350 void *params;
350}; 351};
351 352
@@ -362,16 +363,12 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
362 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 363 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
363 return 0; 364 return 0;
364 365
365 pr_info("Getting parameters from FDT:\n");
366
367 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 366 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
368 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); 367 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
369 if (!prop) { 368 if (!prop)
370 pr_err("Can't find %s in device tree!\n",
371 dt_params[i].name);
372 return 0; 369 return 0;
373 }
374 dest = info->params + dt_params[i].offset; 370 dest = info->params + dt_params[i].offset;
371 info->found++;
375 372
376 val = of_read_number(prop, len / sizeof(u32)); 373 val = of_read_number(prop, len / sizeof(u32));
377 374
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
390int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) 387int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
391{ 388{
392 struct param_info info; 389 struct param_info info;
390 int ret;
391
392 pr_info("Getting EFI parameters from FDT:\n");
393 393
394 info.verbose = verbose; 394 info.verbose = verbose;
395 info.found = 0;
395 info.params = params; 396 info.params = params;
396 397
397 return of_scan_flat_dt(fdt_find_uefi_params, &info); 398 ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
399 if (!info.found)
400 pr_info("UEFI not found.\n");
401 else if (!ret)
402 pr_err("Can't find '%s' in device tree!\n",
403 dt_params[info.found].name);
404
405 return ret;
398} 406}
399#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ 407#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 82d774161cc9..507a3df46a5d 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
23 u32 fdt_val32; 23 u32 fdt_val32;
24 u64 fdt_val64; 24 u64 fdt_val64;
25 25
26 /*
27 * Copy definition of linux_banner here. Since this code is
28 * built as part of the decompressor for ARM v7, pulling
29 * in version.c where linux_banner is defined for the
30 * kernel brings other kernel dependencies with it.
31 */
32 const char linux_banner[] =
33 "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
34 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
35
36 /* Do some checks on provided FDT, if it exists*/ 26 /* Do some checks on provided FDT, if it exists*/
37 if (orig_fdt) { 27 if (orig_fdt) {
38 if (fdt_check_header(orig_fdt)) { 28 if (fdt_check_header(orig_fdt)) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e27e7804c0b9..f0be855ddf45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11673,6 +11673,9 @@ static struct intel_quirk intel_quirks[] = {
11673 11673
11674 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 11674 /* Toshiba CB35 Chromebook (Celeron 2955U) */
11675 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 11675 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
11676
11677 /* HP Chromebook 14 (Celeron 2955U) */
11678 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
11676}; 11679};
11677 11680
11678static void intel_init_quirks(struct drm_device *dev) 11681static void intel_init_quirks(struct drm_device *dev)
@@ -11911,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11911 * ... */ 11914 * ... */
11912 plane = crtc->plane; 11915 plane = crtc->plane;
11913 crtc->plane = !plane; 11916 crtc->plane = !plane;
11917 crtc->primary_enabled = true;
11914 dev_priv->display.crtc_disable(&crtc->base); 11918 dev_priv->display.crtc_disable(&crtc->base);
11915 crtc->plane = plane; 11919 crtc->plane = plane;
11916 11920
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 075170d1844f..8a1a4fbc06ac 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -906,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
906 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 906 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
907 bpp); 907 bpp);
908 908
909 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { 909 for (clock = min_clock; clock <= max_clock; clock++) {
910 for (clock = min_clock; clock <= max_clock; clock++) { 910 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
911 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 911 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
912 link_avail = intel_dp_max_data_rate(link_clock, 912 link_avail = intel_dp_max_data_rate(link_clock,
913 lane_count); 913 lane_count);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 23126023aeba..5e5a72fca5fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
111 111
112 pipe_config->adjusted_mode.flags |= flags; 112 pipe_config->adjusted_mode.flags |= flags;
113 113
114 /* gen2/3 store dither state in pfit control, needs to match */
115 if (INTEL_INFO(dev)->gen < 4) {
116 tmp = I915_READ(PFIT_CONTROL);
117
118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
119 }
120
114 dotclock = pipe_config->port_clock; 121 dotclock = pipe_config->port_clock;
115 122
116 if (HAS_PCH_SPLIT(dev_priv->dev)) 123 if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 628cd8938274..12b02fe1d0ae 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
362 PFIT_FILTER_FUZZY); 362 PFIT_FILTER_FUZZY);
363 363
364 /* Make sure pre-965 set dither correctly for 18bpp panels. */
365 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
366 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
367
368out: 364out:
369 if ((pfit_control & PFIT_ENABLE) == 0) { 365 if ((pfit_control & PFIT_ENABLE) == 0) {
370 pfit_control = 0; 366 pfit_control = 0;
371 pfit_pgm_ratios = 0; 367 pfit_pgm_ratios = 0;
372 } 368 }
373 369
370 /* Make sure pre-965 set dither correctly for 18bpp panels. */
371 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
372 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
373
374 pipe_config->gmch_pfit.control = pfit_control; 374 pipe_config->gmch_pfit.control = pfit_control;
375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; 375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
376 pipe_config->gmch_pfit.lvds_border_bits = border; 376 pipe_config->gmch_pfit.lvds_border_bits = border;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, 192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
193 NOUVEAU_THERM_THRS_SHUTDOWN); 193 NOUVEAU_THERM_THRS_SHUTDOWN);
194 194
195 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
196
195 /* schedule the next poll in one second */ 197 /* schedule the next poll in one second */
196 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) 198 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
197 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); 199 ptimer->alarm(ptimer, 1000000000ULL, alarm);
198
199 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
200} 200}
201 201
202void 202void
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
33 33
34 pending = xchg(&qdev->ram_header->int_pending, 0); 34 pending = xchg(&qdev->ram_header->int_pending, 0);
35 35
36 if (!pending)
37 return IRQ_NONE;
38
36 atomic_inc(&qdev->irq_received); 39 atomic_inc(&qdev->irq_received);
37 40
38 if (pending & QXL_INTERRUPT_DISPLAY) { 41 if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1416 1416
1417 /* set pageflip to happen anywhere in vblank interval */ 1417 /* set pageflip to happen only at start of vblank interval (front porch) */
1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1419 1419
1420 if (!atomic && fb && fb != crtc->primary->fb) { 1420 if (!atomic && fb && fb != crtc->primary->fb) {
1421 radeon_fb = to_radeon_framebuffer(fb); 1421 radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1616 1616
1617 /* set pageflip to happen anywhere in vblank interval */ 1617 /* set pageflip to happen only at start of vblank interval (front porch) */
1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1619 1619
1620 if (!atomic && fb && fb != crtc->primary->fb) { 1620 if (!atomic && fb && fb != crtc->primary->fb) {
1621 radeon_fb = to_radeon_framebuffer(fb); 1621 radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..7d68203a3737 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
183 struct backlight_properties props; 183 struct backlight_properties props;
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level;
187 char bl_name[16]; 186 char bl_name[16];
188 187
189 /* Mac laptops with multiple GPUs use the gmux driver for backlight 188 /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
222 221
223 pdata->encoder = radeon_encoder; 222 pdata->encoder = radeon_encoder;
224 223
225 backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
226
227 dig = radeon_encoder->enc_priv; 224 dig = radeon_encoder->enc_priv;
228 dig->bl_dev = bd; 225 dig->bl_dev = bd;
229 226
230 bd->props.brightness = radeon_atom_backlight_get_brightness(bd); 227 bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
228 /* Set a reasonable default here if the level is 0 otherwise
229 * fbdev will attempt to turn the backlight on after console
230 * unblanking and it will try and restore 0 which turns the backlight
231 * off again.
232 */
233 if (bd->props.brightness == 0)
234 bd->props.brightness = RADEON_MAX_BL_LEVEL;
231 bd->props.power = FB_BLANK_UNBLANK; 235 bd->props.power = FB_BLANK_UNBLANK;
232 backlight_update_status(bd); 236 backlight_update_status(bd);
233 237
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f7ece0ff431b..250bac3935a4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2642 for (i = 0; i < rdev->num_crtc; i++) { 2642 for (i = 0; i < rdev->num_crtc; i++) {
2643 if (save->crtc_enabled[i]) { 2643 if (save->crtc_enabled[i]) {
2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); 2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2645 if ((tmp & 0x3) != 0) { 2645 if ((tmp & 0x7) != 3) {
2646 tmp &= ~0x3; 2646 tmp &= ~0x7;
2647 tmp |= 0x3;
2647 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 2648 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2648 } 2649 }
2649 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); 2650 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
239# define EVERGREEN_CRTC_V_BLANK (1 << 0) 239# define EVERGREEN_CRTC_V_BLANK (1 << 0)
240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
242#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
243#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 242#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
244#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 243#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
245#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 244#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 29d9cc04c04e..b7204500a9a6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -684,10 +684,9 @@ struct radeon_flip_work {
684 struct work_struct unpin_work; 684 struct work_struct unpin_work;
685 struct radeon_device *rdev; 685 struct radeon_device *rdev;
686 int crtc_id; 686 int crtc_id;
687 struct drm_framebuffer *fb; 687 uint64_t base;
688 struct drm_pending_vblank_event *event; 688 struct drm_pending_vblank_event *event;
689 struct radeon_bo *old_rbo; 689 struct radeon_bo *old_rbo;
690 struct radeon_bo *new_rbo;
691 struct radeon_fence *fence; 690 struct radeon_fence *fence;
692}; 691};
693 692
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 13896edcf0b6..bf25061c8ac4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -366,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
367 367
368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
369 radeon_fence_unref(&work->fence);
370 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 369 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
371 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 370 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
372} 371}
@@ -386,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
386 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 385 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
387 386
388 struct drm_crtc *crtc = &radeon_crtc->base; 387 struct drm_crtc *crtc = &radeon_crtc->base;
389 struct drm_framebuffer *fb = work->fb;
390
391 uint32_t tiling_flags, pitch_pixels;
392 uint64_t base;
393
394 unsigned long flags; 388 unsigned long flags;
395 int r; 389 int r;
396 390
397 down_read(&rdev->exclusive_lock); 391 down_read(&rdev->exclusive_lock);
398 while (work->fence) { 392 if (work->fence) {
399 r = radeon_fence_wait(work->fence, false); 393 r = radeon_fence_wait(work->fence, false);
400 if (r == -EDEADLK) { 394 if (r == -EDEADLK) {
401 up_read(&rdev->exclusive_lock); 395 up_read(&rdev->exclusive_lock);
402 r = radeon_gpu_reset(rdev); 396 r = radeon_gpu_reset(rdev);
403 down_read(&rdev->exclusive_lock); 397 down_read(&rdev->exclusive_lock);
404 } 398 }
399 if (r)
400 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
405 401
406 if (r) { 402 /* We continue with the page flip even if we failed to wait on
407 DRM_ERROR("failed to wait on page flip fence (%d)!\n", 403 * the fence, otherwise the DRM core and userspace will be
408 r); 404 * confused about which BO the CRTC is scanning out
409 goto cleanup; 405 */
410 } else 406
411 radeon_fence_unref(&work->fence); 407 radeon_fence_unref(&work->fence);
412 } 408 }
413 409
410 /* We borrow the event spin lock for protecting flip_status */
411 spin_lock_irqsave(&crtc->dev->event_lock, flags);
412
413 /* set the proper interrupt */
414 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
415
416 /* do the flip (mmio) */
417 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
418
419 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
420 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
421 up_read(&rdev->exclusive_lock);
422}
423
424static int radeon_crtc_page_flip(struct drm_crtc *crtc,
425 struct drm_framebuffer *fb,
426 struct drm_pending_vblank_event *event,
427 uint32_t page_flip_flags)
428{
429 struct drm_device *dev = crtc->dev;
430 struct radeon_device *rdev = dev->dev_private;
431 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
432 struct radeon_framebuffer *old_radeon_fb;
433 struct radeon_framebuffer *new_radeon_fb;
434 struct drm_gem_object *obj;
435 struct radeon_flip_work *work;
436 struct radeon_bo *new_rbo;
437 uint32_t tiling_flags, pitch_pixels;
438 uint64_t base;
439 unsigned long flags;
440 int r;
441
442 work = kzalloc(sizeof *work, GFP_KERNEL);
443 if (work == NULL)
444 return -ENOMEM;
445
446 INIT_WORK(&work->flip_work, radeon_flip_work_func);
447 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
448
449 work->rdev = rdev;
450 work->crtc_id = radeon_crtc->crtc_id;
451 work->event = event;
452
453 /* schedule unpin of the old buffer */
454 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
455 obj = old_radeon_fb->obj;
456
457 /* take a reference to the old object */
458 drm_gem_object_reference(obj);
459 work->old_rbo = gem_to_radeon_bo(obj);
460
461 new_radeon_fb = to_radeon_framebuffer(fb);
462 obj = new_radeon_fb->obj;
463 new_rbo = gem_to_radeon_bo(obj);
464
465 spin_lock(&new_rbo->tbo.bdev->fence_lock);
466 if (new_rbo->tbo.sync_obj)
467 work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
468 spin_unlock(&new_rbo->tbo.bdev->fence_lock);
469
414 /* pin the new buffer */ 470 /* pin the new buffer */
415 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 471 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
416 work->old_rbo, work->new_rbo); 472 work->old_rbo, new_rbo);
417 473
418 r = radeon_bo_reserve(work->new_rbo, false); 474 r = radeon_bo_reserve(new_rbo, false);
419 if (unlikely(r != 0)) { 475 if (unlikely(r != 0)) {
420 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 476 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
421 goto cleanup; 477 goto cleanup;
422 } 478 }
423 /* Only 27 bit offset for legacy CRTC */ 479 /* Only 27 bit offset for legacy CRTC */
424 r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, 480 r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
425 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 481 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
426 if (unlikely(r != 0)) { 482 if (unlikely(r != 0)) {
427 radeon_bo_unreserve(work->new_rbo); 483 radeon_bo_unreserve(new_rbo);
428 r = -EINVAL; 484 r = -EINVAL;
429 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 485 DRM_ERROR("failed to pin new rbo buffer before flip\n");
430 goto cleanup; 486 goto cleanup;
431 } 487 }
432 radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); 488 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
433 radeon_bo_unreserve(work->new_rbo); 489 radeon_bo_unreserve(new_rbo);
434 490
435 if (!ASIC_IS_AVIVO(rdev)) { 491 if (!ASIC_IS_AVIVO(rdev)) {
436 /* crtc offset is from display base addr not FB location */ 492 /* crtc offset is from display base addr not FB location */
@@ -467,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
467 } 523 }
468 base &= ~7; 524 base &= ~7;
469 } 525 }
526 work->base = base;
470 527
471 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 528 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
472 if (r) { 529 if (r) {
@@ -477,100 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
477 /* We borrow the event spin lock for protecting flip_work */ 534 /* We borrow the event spin lock for protecting flip_work */
478 spin_lock_irqsave(&crtc->dev->event_lock, flags); 535 spin_lock_irqsave(&crtc->dev->event_lock, flags);
479 536
480 /* set the proper interrupt */ 537 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
481 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 538 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
539 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
540 r = -EBUSY;
541 goto vblank_cleanup;
542 }
543 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
544 radeon_crtc->flip_work = work;
482 545
483 /* do the flip (mmio) */ 546 /* update crtc fb */
484 radeon_page_flip(rdev, radeon_crtc->crtc_id, base); 547 crtc->primary->fb = fb;
485 548
486 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
487 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 549 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
488 up_read(&rdev->exclusive_lock);
489 550
490 return; 551 queue_work(radeon_crtc->flip_queue, &work->flip_work);
552 return 0;
553
554vblank_cleanup:
555 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
491 556
492pflip_cleanup: 557pflip_cleanup:
493 if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { 558 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
494 DRM_ERROR("failed to reserve new rbo in error path\n"); 559 DRM_ERROR("failed to reserve new rbo in error path\n");
495 goto cleanup; 560 goto cleanup;
496 } 561 }
497 if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { 562 if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
498 DRM_ERROR("failed to unpin new rbo in error path\n"); 563 DRM_ERROR("failed to unpin new rbo in error path\n");
499 } 564 }
500 radeon_bo_unreserve(work->new_rbo); 565 radeon_bo_unreserve(new_rbo);
501 566
502cleanup: 567cleanup:
503 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 568 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
504 radeon_fence_unref(&work->fence); 569 radeon_fence_unref(&work->fence);
505 kfree(work); 570 kfree(work);
506 up_read(&rdev->exclusive_lock);
507}
508
509static int radeon_crtc_page_flip(struct drm_crtc *crtc,
510 struct drm_framebuffer *fb,
511 struct drm_pending_vblank_event *event,
512 uint32_t page_flip_flags)
513{
514 struct drm_device *dev = crtc->dev;
515 struct radeon_device *rdev = dev->dev_private;
516 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
517 struct radeon_framebuffer *old_radeon_fb;
518 struct radeon_framebuffer *new_radeon_fb;
519 struct drm_gem_object *obj;
520 struct radeon_flip_work *work;
521 unsigned long flags;
522
523 work = kzalloc(sizeof *work, GFP_KERNEL);
524 if (work == NULL)
525 return -ENOMEM;
526
527 INIT_WORK(&work->flip_work, radeon_flip_work_func);
528 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
529
530 work->rdev = rdev;
531 work->crtc_id = radeon_crtc->crtc_id;
532 work->fb = fb;
533 work->event = event;
534
535 /* schedule unpin of the old buffer */
536 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
537 obj = old_radeon_fb->obj;
538
539 /* take a reference to the old object */
540 drm_gem_object_reference(obj);
541 work->old_rbo = gem_to_radeon_bo(obj);
542
543 new_radeon_fb = to_radeon_framebuffer(fb);
544 obj = new_radeon_fb->obj;
545 work->new_rbo = gem_to_radeon_bo(obj);
546
547 spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
548 if (work->new_rbo->tbo.sync_obj)
549 work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
550 spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
551
552 /* We borrow the event spin lock for protecting flip_work */
553 spin_lock_irqsave(&crtc->dev->event_lock, flags);
554 571
555 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) { 572 return r;
556 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
557 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
558 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
559 radeon_fence_unref(&work->fence);
560 kfree(work);
561 return -EBUSY;
562 }
563 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
564 radeon_crtc->flip_work = work;
565
566 /* update crtc fb */
567 crtc->primary->fb = fb;
568
569 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
570
571 queue_work(radeon_crtc->flip_queue, &work->flip_work);
572
573 return 0;
574} 573}
575 574
576static int 575static int
@@ -830,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
830 struct radeon_device *rdev = dev->dev_private; 829 struct radeon_device *rdev = dev->dev_private;
831 int ret = 0; 830 int ret = 0;
832 831
832 /* don't leak the edid if we already fetched it in detect() */
833 if (radeon_connector->edid)
834 goto got_edid;
835
833 /* on hw with routers, select right port */ 836 /* on hw with routers, select right port */
834 if (radeon_connector->router.ddc_valid) 837 if (radeon_connector->router.ddc_valid)
835 radeon_router_select_ddc_port(radeon_connector); 838 radeon_router_select_ddc_port(radeon_connector);
@@ -868,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
868 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 871 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
869 } 872 }
870 if (radeon_connector->edid) { 873 if (radeon_connector->edid) {
874got_edid:
871 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 875 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
872 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 876 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
873 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); 877 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
406 for (i = 0; i < rdev->num_crtc; i++) { 406 for (i = 0; i < rdev->num_crtc; i++) {
407 if (save->crtc_enabled[i]) { 407 if (save->crtc_enabled[i]) {
408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); 408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
409 if ((tmp & 0x3) != 0) { 409 if ((tmp & 0x7) != 3) {
410 tmp &= ~0x3; 410 tmp &= ~0x7;
411 tmp |= 0x3;
411 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 412 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
412 } 413 }
413 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); 414 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index eaaa3d843b80..23b2ce294c4c 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
246 /* 246 /*
247 * Send the information to the user-level daemon. 247 * Send the information to the user-level daemon.
248 */ 248 */
249 fcopy_send_data();
250 schedule_delayed_work(&fcopy_work, 5*HZ); 249 schedule_delayed_work(&fcopy_work, 5*HZ);
250 fcopy_send_data();
251 return; 251 return;
252 } 252 }
253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f4dea5ccf17..9ee3913850d6 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
515 return -EINVAL; 515 return -EINVAL;
516 516
517 temp = DIV_ROUND_CLOSEST(temp, 1000); 517 temp = DIV_ROUND_CLOSEST(temp, 1000);
518 temp = clamp_val(temp, 0, 255); 518 temp = clamp_val(temp, -128, 127);
519 519
520 mutex_lock(&data->lock); 520 mutex_lock(&data->lock);
521 data->temp_min[attr->index] = temp; 521 data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
549 return -EINVAL; 549 return -EINVAL;
550 550
551 temp = DIV_ROUND_CLOSEST(temp, 1000); 551 temp = DIV_ROUND_CLOSEST(temp, 1000);
552 temp = clamp_val(temp, 0, 255); 552 temp = clamp_val(temp, -128, 127);
553 553
554 mutex_lock(&data->lock); 554 mutex_lock(&data->lock);
555 data->temp_max[attr->index] = temp; 555 data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
826 return -EINVAL; 826 return -EINVAL;
827 827
828 temp = DIV_ROUND_CLOSEST(temp, 1000); 828 temp = DIV_ROUND_CLOSEST(temp, 1000);
829 temp = clamp_val(temp, 0, 255); 829 temp = clamp_val(temp, -128, 127);
830 830
831 mutex_lock(&data->lock); 831 mutex_lock(&data->lock);
832 data->pwm_tmin[attr->index] = temp; 832 data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index afd31042b452..d14ab3c45daa 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
194 struct device_attribute *devattr, 194 struct device_attribute *devattr,
195 char *buf) 195 char *buf)
196{ 196{
197 return sprintf(buf, "da9052-hwmon\n"); 197 return sprintf(buf, "da9052\n");
198} 198}
199 199
200static ssize_t show_label(struct device *dev, 200static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 73b3865f1207..35eb7738d711 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
204 struct device_attribute *devattr, 204 struct device_attribute *devattr,
205 char *buf) 205 char *buf)
206{ 206{
207 return sprintf(buf, "da9055-hwmon\n"); 207 return sprintf(buf, "da9055\n");
208} 208}
209 209
210static ssize_t show_label(struct device *dev, 210static ssize_t show_label(struct device *dev,
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d87..a04c49f2a011 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
416 416
417config BLK_DEV_CS5520 417config BLK_DEV_CS5520
418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" 418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
419 depends on X86_32 || COMPILE_TEST
419 select BLK_DEV_IDEDMA_PCI 420 select BLK_DEV_IDEDMA_PCI
420 help 421 help
421 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX 422 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
426 427
427config BLK_DEV_CS5530 428config BLK_DEV_CS5530
428 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" 429 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
430 depends on X86_32 || COMPILE_TEST
429 select BLK_DEV_IDEDMA_PCI 431 select BLK_DEV_IDEDMA_PCI
430 help 432 help
431 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This 433 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
435 437
436config BLK_DEV_CS5535 438config BLK_DEV_CS5535
437 tristate "AMD CS5535 chipset support" 439 tristate "AMD CS5535 chipset support"
438 depends on X86 && !X86_64 440 depends on X86_32
439 select BLK_DEV_IDEDMA_PCI 441 select BLK_DEV_IDEDMA_PCI
440 help 442 help
441 Include support for UDMA on the NSC/AMD CS5535 companion chipset. 443 Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
486 488
487config BLK_DEV_SC1200 489config BLK_DEV_SC1200
488 tristate "National SCx200 chipset support" 490 tristate "National SCx200 chipset support"
491 depends on X86_32 || COMPILE_TEST
489 select BLK_DEV_IDEDMA_PCI 492 select BLK_DEV_IDEDMA_PCI
490 help 493 help
491 This driver adds support for the on-board IDE controller on the 494 This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370e..a3d3b1733c49 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
853 if (irq_handler == NULL) 853 if (irq_handler == NULL)
854 irq_handler = ide_intr; 854 irq_handler = ide_intr;
855 855
856 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) 856 if (!host->get_lock)
857 goto out_up; 857 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
858 goto out_up;
858 859
859#if !defined(__mc68000__) 860#if !defined(__mc68000__)
860 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 861 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
1533 1534
1534 ide_proc_unregister_port(hwif); 1535 ide_proc_unregister_port(hwif);
1535 1536
1536 free_irq(hwif->irq, hwif); 1537 if (!hwif->host->get_lock)
1538 free_irq(hwif->irq, hwif);
1537 1539
1538 device_unregister(hwif->portdev); 1540 device_unregister(hwif->portdev);
1539 device_unregister(&hwif->gendev); 1541 device_unregister(&hwif->gendev);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 17aeea170566..2a5fa9a436e5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
111 {6, 250000}, {1, 560000} 111 {6, 250000}, {1, 560000}
112}; 112};
113 113
114/*
115 * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
116 * The userspace interface uses m/s^2 and we declare micro units
117 * So scale factor is given by:
118 * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
119 */
114static const int mma8452_scales[3][2] = { 120static const int mma8452_scales[3][2] = {
115 {0, 977}, {0, 1953}, {0, 3906} 121 {0, 9577}, {0, 19154}, {0, 38307}
116}; 122};
117 123
118static ssize_t mma8452_show_samp_freq_avail(struct device *dev, 124static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 258a973a1fb8..bfbf4d419f41 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
345 &indio_dev->event_interface->dev_attr_list); 345 &indio_dev->event_interface->dev_attr_list);
346 kfree(postfix); 346 kfree(postfix);
347 347
348 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
349 continue;
350
348 if (ret) 351 if (ret)
349 return ret; 352 return ret;
350 353
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 6d61a16d1f5c..c2fb71c182a8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -433,8 +433,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
433 */ 433 */
434static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 434static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
435{ 435{
436 struct c4iw_ep *ep = handle;
437
436 printk(KERN_ERR MOD "ARP failure duing connect\n"); 438 printk(KERN_ERR MOD "ARP failure duing connect\n");
437 kfree_skb(skb); 439 kfree_skb(skb);
440 connect_reply_upcall(ep, -EHOSTUNREACH);
441 state_set(&ep->com, DEAD);
442 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
443 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
444 dst_release(ep->dst);
445 cxgb4_l2t_release(ep->l2t);
446 c4iw_put_ep(&ep->com);
438} 447}
439 448
440/* 449/*
@@ -660,7 +669,7 @@ static int send_connect(struct c4iw_ep *ep)
660 opt2 |= T5_OPT_2_VALID; 669 opt2 |= T5_OPT_2_VALID;
661 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 670 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
662 } 671 }
663 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 672 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
664 673
665 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 674 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
666 if (ep->com.remote_addr.ss_family == AF_INET) { 675 if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2219,7 +2228,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2219 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2228 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2220 BUG_ON(skb_cloned(skb)); 2229 BUG_ON(skb_cloned(skb));
2221 skb_trim(skb, sizeof(struct cpl_tid_release)); 2230 skb_trim(skb, sizeof(struct cpl_tid_release));
2222 skb_get(skb);
2223 release_tid(&dev->rdev, hwtid, skb); 2231 release_tid(&dev->rdev, hwtid, skb);
2224 return; 2232 return;
2225} 2233}
@@ -3969,7 +3977,7 @@ int __init c4iw_cm_init(void)
3969 return 0; 3977 return 0;
3970} 3978}
3971 3979
3972void __exit c4iw_cm_term(void) 3980void c4iw_cm_term(void)
3973{ 3981{
3974 WARN_ON(!list_empty(&timeout_list)); 3982 WARN_ON(!list_empty(&timeout_list));
3975 flush_workqueue(workq); 3983 flush_workqueue(workq);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index bda949223637..f25df5276c22 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -844,6 +844,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
844 pr_err(MOD "error allocating status page\n"); 844 pr_err(MOD "error allocating status page\n");
845 goto err4; 845 goto err4;
846 } 846 }
847
847 if (c4iw_wr_log) { 848 if (c4iw_wr_log) {
848 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) * 849 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
849 sizeof(*rdev->wr_log), GFP_KERNEL); 850 sizeof(*rdev->wr_log), GFP_KERNEL);
@@ -854,6 +855,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
854 pr_err(MOD "error allocating wr_log. Logging disabled\n"); 855 pr_err(MOD "error allocating wr_log. Logging disabled\n");
855 } 856 }
856 } 857 }
858
859 rdev->status_page->db_off = 0;
860
857 return 0; 861 return 0;
858err4: 862err4:
859 c4iw_rqtpool_destroy(rdev); 863 c4iw_rqtpool_destroy(rdev);
@@ -888,7 +892,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
888 if (ctx->dev->rdev.oc_mw_kva) 892 if (ctx->dev->rdev.oc_mw_kva)
889 iounmap(ctx->dev->rdev.oc_mw_kva); 893 iounmap(ctx->dev->rdev.oc_mw_kva);
890 ib_dealloc_device(&ctx->dev->ibdev); 894 ib_dealloc_device(&ctx->dev->ibdev);
891 iwpm_exit(RDMA_NL_C4IW);
892 ctx->dev = NULL; 895 ctx->dev = NULL;
893} 896}
894 897
@@ -1007,12 +1010,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
1007 setup_debugfs(devp); 1010 setup_debugfs(devp);
1008 } 1011 }
1009 1012
1010 ret = iwpm_init(RDMA_NL_C4IW);
1011 if (ret) {
1012 pr_err("port mapper initialization failed with %d\n", ret);
1013 ib_dealloc_device(&devp->ibdev);
1014 return ERR_PTR(ret);
1015 }
1016 1013
1017 return devp; 1014 return devp;
1018} 1015}
@@ -1513,6 +1510,15 @@ static int __init c4iw_init_module(void)
1513 pr_err("%s[%u]: Failed to add netlink callback\n" 1510 pr_err("%s[%u]: Failed to add netlink callback\n"
1514 , __func__, __LINE__); 1511 , __func__, __LINE__);
1515 1512
1513 err = iwpm_init(RDMA_NL_C4IW);
1514 if (err) {
1515 pr_err("port mapper initialization failed with %d\n", err);
1516 ibnl_remove_client(RDMA_NL_C4IW);
1517 c4iw_cm_term();
1518 debugfs_remove_recursive(c4iw_debugfs_root);
1519 return err;
1520 }
1521
1516 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1522 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1517 1523
1518 return 0; 1524 return 0;
@@ -1530,6 +1536,7 @@ static void __exit c4iw_exit_module(void)
1530 } 1536 }
1531 mutex_unlock(&dev_mutex); 1537 mutex_unlock(&dev_mutex);
1532 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1538 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1539 iwpm_exit(RDMA_NL_C4IW);
1533 ibnl_remove_client(RDMA_NL_C4IW); 1540 ibnl_remove_client(RDMA_NL_C4IW);
1534 c4iw_cm_term(); 1541 c4iw_cm_term();
1535 debugfs_remove_recursive(c4iw_debugfs_root); 1542 debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index c378fd25ee0c..b5678ac97393 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -943,7 +943,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
943int c4iw_register_device(struct c4iw_dev *dev); 943int c4iw_register_device(struct c4iw_dev *dev);
944void c4iw_unregister_device(struct c4iw_dev *dev); 944void c4iw_unregister_device(struct c4iw_dev *dev);
945int __init c4iw_cm_init(void); 945int __init c4iw_cm_init(void);
946void __exit c4iw_cm_term(void); 946void c4iw_cm_term(void);
947void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 947void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
948 struct c4iw_dev_ucontext *uctx); 948 struct c4iw_dev_ucontext *uctx);
949void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 949void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d13ddf1c0033..bbbcf389272c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
675 int err; 675 int err;
676 676
677 uuari = &dev->mdev.priv.uuari; 677 uuari = &dev->mdev.priv.uuari;
678 if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) 678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
679 return -EINVAL; 679 return -EINVAL;
680 680
681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b99dd88e31b9..bb446d742a2d 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) 170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{ 171{
172 /* Bug if not a power of 2 */ 172 /* Bug if not a power of 2 */
173 BUG_ON(!is_power_of_2(addrspace_size)); 173 BUG_ON((addrspace_size & (addrspace_size - 1)));
174 174
175 /* window size is 2^(WSE+1) bytes */ 175 /* window size is 2^(WSE+1) bytes */
176 return __ffs(addrspace_size) - 1; 176 return fls64(addrspace_size) - 2;
177} 177}
178 178
179/* Derive the PAACE window count encoding for the subwindow count */ 179/* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
351 struct paace *ppaace; 351 struct paace *ppaace;
352 unsigned long fspi; 352 unsigned long fspi;
353 353
354 if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { 354 if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size); 355 pr_debug("window size too small or not a power of two %llx\n", win_size);
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
464 return -ENOENT; 464 return -ENOENT;
465 } 465 }
466 466
467 if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { 467 if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
468 pr_debug("subwindow size out of range, or not a power of 2\n"); 468 pr_debug("subwindow size out of range, or not a power of 2\n");
469 return -EINVAL; 469 return -EINVAL;
470 } 470 }
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 93072ba44b1d..af47648301a9 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
301 * Size must be a power of two and at least be equal 301 * Size must be a power of two and at least be equal
302 * to PAMU page size. 302 * to PAMU page size.
303 */ 303 */
304 if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { 304 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
305 pr_debug("%s: size too small or not a power of two\n", __func__); 305 pr_debug("%s: size too small or not a power of two\n", __func__);
306 return -EINVAL; 306 return -EINVAL;
307 } 307 }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
335 return domain; 335 return domain;
336} 336}
337 337
338static inline struct device_domain_info *find_domain(struct device *dev)
339{
340 return dev->archdata.iommu_domain;
341}
342
343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) 338static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
344{ 339{
345 unsigned long flags; 340 unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
380 * Check here if the device is already attached to domain or not. 375 * Check here if the device is already attached to domain or not.
381 * If the device is already attached to a domain detach it. 376 * If the device is already attached to a domain detach it.
382 */ 377 */
383 old_domain_info = find_domain(dev); 378 old_domain_info = dev->archdata.iommu_domain;
384 if (old_domain_info && old_domain_info->domain != dma_domain) { 379 if (old_domain_info && old_domain_info->domain != dma_domain) {
385 spin_unlock_irqrestore(&device_domain_lock, flags); 380 spin_unlock_irqrestore(&device_domain_lock, flags);
386 detach_device(dev, old_domain_info->domain); 381 detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
399 * the info for the first LIODN as all 394 * the info for the first LIODN as all
400 * LIODNs share the same domain 395 * LIODNs share the same domain
401 */ 396 */
402 if (!old_domain_info) 397 if (!dev->archdata.iommu_domain)
403 dev->archdata.iommu_domain = info; 398 dev->archdata.iommu_domain = info;
404 spin_unlock_irqrestore(&device_domain_lock, flags); 399 spin_unlock_irqrestore(&device_domain_lock, flags);
405 400
@@ -1042,12 +1037,15 @@ root_bus:
1042 group = get_shared_pci_device_group(pdev); 1037 group = get_shared_pci_device_group(pdev);
1043 } 1038 }
1044 1039
1040 if (!group)
1041 group = ERR_PTR(-ENODEV);
1042
1045 return group; 1043 return group;
1046} 1044}
1047 1045
1048static int fsl_pamu_add_device(struct device *dev) 1046static int fsl_pamu_add_device(struct device *dev)
1049{ 1047{
1050 struct iommu_group *group = NULL; 1048 struct iommu_group *group = ERR_PTR(-ENODEV);
1051 struct pci_dev *pdev; 1049 struct pci_dev *pdev;
1052 const u32 *prop; 1050 const u32 *prop;
1053 int ret, len; 1051 int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
1070 group = get_device_iommu_group(dev); 1068 group = get_device_iommu_group(dev);
1071 } 1069 }
1072 1070
1073 if (!group || IS_ERR(group)) 1071 if (IS_ERR(group))
1074 return PTR_ERR(group); 1072 return PTR_ERR(group);
1075 1073
1076 ret = iommu_group_add_device(group, dev); 1074 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d6ae8c..7c131cf7cc13 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -42,6 +42,7 @@
42#include <linux/irqchip/chained_irq.h> 42#include <linux/irqchip/chained_irq.h>
43#include <linux/irqchip/arm-gic.h> 43#include <linux/irqchip/arm-gic.h>
44 44
45#include <asm/cputype.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
46#include <asm/exception.h> 47#include <asm/exception.h>
47#include <asm/smp_plat.h> 48#include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
954 } 955 }
955 956
956 for_each_possible_cpu(cpu) { 957 for_each_possible_cpu(cpu) {
957 unsigned long offset = percpu_offset * cpu_logical_map(cpu); 958 u32 mpidr = cpu_logical_map(cpu);
959 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
960 unsigned long offset = percpu_offset * core_id;
958 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 961 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
959 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 962 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
960 } 963 }
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1071 gic_cnt++; 1074 gic_cnt++;
1072 return 0; 1075 return 0;
1073} 1076}
1077IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1074IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1078IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1075IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1079IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1080IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1076IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1081IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1077IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1082IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1078 1083
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a333b7f798d1..62f0688d45a5 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -638,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
638 fprog.len = len; 638 fprog.len = len;
639 fprog.filter = code; 639 fprog.filter = code;
640 640
641 if (is->pass_filter) 641 if (is->pass_filter) {
642 sk_unattached_filter_destroy(is->pass_filter); 642 sk_unattached_filter_destroy(is->pass_filter);
643 err = sk_unattached_filter_create(&is->pass_filter, &fprog); 643 is->pass_filter = NULL;
644 }
645 if (fprog.filter != NULL)
646 err = sk_unattached_filter_create(&is->pass_filter,
647 &fprog);
648 else
649 err = 0;
644 kfree(code); 650 kfree(code);
645 651
646 return err; 652 return err;
@@ -657,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
657 fprog.len = len; 663 fprog.len = len;
658 fprog.filter = code; 664 fprog.filter = code;
659 665
660 if (is->active_filter) 666 if (is->active_filter) {
661 sk_unattached_filter_destroy(is->active_filter); 667 sk_unattached_filter_destroy(is->active_filter);
662 err = sk_unattached_filter_create(&is->active_filter, &fprog); 668 is->active_filter = NULL;
669 }
670 if (fprog.filter != NULL)
671 err = sk_unattached_filter_create(&is->active_filter,
672 &fprog);
673 else
674 err = 0;
663 kfree(code); 675 kfree(code);
664 676
665 return err; 677 return err;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4ead4ba60656..d2899e7eb3aa 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
425 425
426 disk_super = dm_block_data(sblock); 426 disk_super = dm_block_data(sblock);
427 427
428 /* Verify the data block size hasn't changed */
429 if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
430 DMERR("changing the data block size (from %u to %llu) is not supported",
431 le32_to_cpu(disk_super->data_block_size),
432 (unsigned long long)cmd->data_block_size);
433 r = -EINVAL;
434 goto bad;
435 }
436
428 r = __check_incompat_features(disk_super, cmd); 437 r = __check_incompat_features(disk_super, cmd);
429 if (r < 0) 438 if (r < 0)
430 goto bad; 439 goto bad;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b086a945edcb..e9d33ad59df5 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
613 613
614 disk_super = dm_block_data(sblock); 614 disk_super = dm_block_data(sblock);
615 615
616 /* Verify the data block size hasn't changed */
617 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
618 DMERR("changing the data block size (from %u to %llu) is not supported",
619 le32_to_cpu(disk_super->data_block_size),
620 (unsigned long long)pmd->data_block_size);
621 r = -EINVAL;
622 goto bad_unlock_sblock;
623 }
624
616 r = __check_incompat_features(disk_super, pmd); 625 r = __check_incompat_features(disk_super, pmd);
617 if (r < 0) 626 if (r < 0)
618 goto bad_unlock_sblock; 627 goto bad_unlock_sblock;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed7623..2e3cdcfa0a67 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
60 jiffies_to_msecs(jiffies) - 60 jiffies_to_msecs(jiffies) -
61 (jiffies_to_msecs(timeout) - TIMEOUT)); 61 (jiffies_to_msecs(timeout) - TIMEOUT));
62 62
63 if (!(cmd->args[0] >> 7) & 0x01) { 63 if (!((cmd->args[0] >> 7) & 0x01)) {
64 ret = -ETIMEDOUT; 64 ret = -ETIMEDOUT;
65 goto err_mutex_unlock; 65 goto err_mutex_unlock;
66 } 66 }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
485 if (ret) 485 if (ret)
486 goto err; 486 goto err;
487 487
488 cmd.args[0] = 0x05;
489 cmd.args[1] = 0x00;
490 cmd.args[2] = 0xaa;
491 cmd.args[3] = 0x4d;
492 cmd.args[4] = 0x56;
493 cmd.args[5] = 0x40;
494 cmd.args[6] = 0x00;
495 cmd.args[7] = 0x00;
496 cmd.wlen = 8;
497 cmd.rlen = 1;
498 ret = si2168_cmd_execute(s, &cmd);
499 if (ret)
500 goto err;
501
502 /* cold state - try to download firmware */ 488 /* cold state - try to download firmware */
503 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", 489 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
504 KBUILD_MODNAME, si2168_ops.info.name); 490 KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f40..53f7f06ae343 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/i2c-mux.h> 23#include <linux/i2c-mux.h>
24 24
25#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" 25#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
26 26
27/* state struct */ 27/* state struct */
28struct si2168 { 28struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5eee..9619be5d4827 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
668 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 668 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
669 int ret, i; 669 int ret, i;
670 u8 mode, rolloff, pilot, inversion, div; 670 u8 mode, rolloff, pilot, inversion, div;
671 fe_modulation_t modulation;
671 672
672 dev_dbg(&priv->i2c->dev, 673 dev_dbg(&priv->i2c->dev,
673 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", 674 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
702 703
703 switch (c->delivery_system) { 704 switch (c->delivery_system) {
704 case SYS_DVBS: 705 case SYS_DVBS:
706 modulation = QPSK;
705 rolloff = 0; 707 rolloff = 0;
706 pilot = 2; 708 pilot = 2;
707 break; 709 break;
708 case SYS_DVBS2: 710 case SYS_DVBS2:
711 modulation = c->modulation;
712
709 switch (c->rolloff) { 713 switch (c->rolloff) {
710 case ROLLOFF_20: 714 case ROLLOFF_20:
711 rolloff = 2; 715 rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
750 754
751 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { 755 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
752 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && 756 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
753 c->modulation == TDA10071_MODCOD[i].modulation && 757 modulation == TDA10071_MODCOD[i].modulation &&
754 c->fec_inner == TDA10071_MODCOD[i].fec) { 758 c->fec_inner == TDA10071_MODCOD[i].fec) {
755 mode = TDA10071_MODCOD[i].val; 759 mode = TDA10071_MODCOD[i].val;
756 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", 760 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
834 838
835 switch ((buf[1] >> 0) & 0x01) { 839 switch ((buf[1] >> 0) & 0x01) {
836 case 0: 840 case 0:
837 c->inversion = INVERSION_OFF; 841 c->inversion = INVERSION_ON;
838 break; 842 break;
839 case 1: 843 case 1:
840 c->inversion = INVERSION_ON; 844 c->inversion = INVERSION_OFF;
841 break; 845 break;
842 } 846 }
843 847
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
856 if (ret) 860 if (ret)
857 goto error; 861 goto error;
858 862
859 c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); 863 c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
860 864
861 return ret; 865 return ret;
862error: 866error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65a..420486192736 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, 55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, 56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
57 /* 8PSK */ 57 /* 8PSK */
58 { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
58 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, 59 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
59 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, 60 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
60 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, 61 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8b..0006d6bf8c18 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
179 .read = vb2_fop_read, 179 .read = vb2_fop_read,
180 .poll = vb2_fop_poll, 180 .poll = vb2_fop_poll,
181 .mmap = vb2_fop_mmap, 181 .mmap = vb2_fop_mmap,
182 .ioctl = video_ioctl2, 182 .unlocked_ioctl = video_ioctl2,
183}; 183};
184 184
185static const struct v4l2_ioctl_ops ts_ioctl_ops = { 185static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed16497903..1e4ec697fb10 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
269 list_del(&buf->list); 269 list_del(&buf->list);
270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
271 } 271 }
272 spin_unlock_irqrestore(&common->irqlock, flags);
272 273
273 return ret; 274 return ret;
274} 275}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bcb..b431b58f39e3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
233 list_del(&buf->list); 233 list_del(&buf->list);
234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
235 } 235 }
236 spin_unlock_irqrestore(&common->irqlock, flags);
236 237
237 return ret; 238 return ret;
238} 239}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee54..fa4cc7b880aa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
57 jiffies_to_msecs(jiffies) - 57 jiffies_to_msecs(jiffies) -
58 (jiffies_to_msecs(timeout) - TIMEOUT)); 58 (jiffies_to_msecs(timeout) - TIMEOUT));
59 59
60 if (!(buf[0] >> 7) & 0x01) { 60 if (!((buf[0] >> 7) & 0x01)) {
61 ret = -ETIMEDOUT; 61 ret = -ETIMEDOUT;
62 goto err_mutex_unlock; 62 goto err_mutex_unlock;
63 } else { 63 } else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d7..7b9b75f60774 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
704 if (ret < 0) 704 if (ret < 0)
705 goto err; 705 goto err;
706 706
707 if (tmp == 0x00) 707 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
708 dev_dbg(&d->udev->dev, 708 __func__, i, tmp);
709 "%s: [%d]tuner not set, using default\n", 709
710 __func__, i); 710 /* tuner sanity check */
711 else 711 if (state->chip_type == 0x9135) {
712 if (state->chip_version == 0x02) {
713 /* IT9135 BX (v2) */
714 switch (tmp) {
715 case AF9033_TUNER_IT9135_60:
716 case AF9033_TUNER_IT9135_61:
717 case AF9033_TUNER_IT9135_62:
718 state->af9033_config[i].tuner = tmp;
719 break;
720 }
721 } else {
722 /* IT9135 AX (v1) */
723 switch (tmp) {
724 case AF9033_TUNER_IT9135_38:
725 case AF9033_TUNER_IT9135_51:
726 case AF9033_TUNER_IT9135_52:
727 state->af9033_config[i].tuner = tmp;
728 break;
729 }
730 }
731 } else {
732 /* AF9035 */
712 state->af9033_config[i].tuner = tmp; 733 state->af9033_config[i].tuner = tmp;
734 }
713 735
714 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", 736 if (state->af9033_config[i].tuner != tmp) {
715 __func__, i, state->af9033_config[i].tuner); 737 dev_info(&d->udev->dev,
738 "%s: [%d] overriding tuner from %02x to %02x\n",
739 KBUILD_MODNAME, i, tmp,
740 state->af9033_config[i].tuner);
741 }
716 742
717 switch (state->af9033_config[i].tuner) { 743 switch (state->af9033_config[i].tuner) {
718 case AF9033_TUNER_TUA9001: 744 case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0f..339adce7c7a5 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
928 {USB_DEVICE(0x093a, 0x2620)}, 928 {USB_DEVICE(0x093a, 0x2620)},
929 {USB_DEVICE(0x093a, 0x2621)}, 929 {USB_DEVICE(0x093a, 0x2621)},
930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, 930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, 932 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
932 {USB_DEVICE(0x093a, 0x2625)}, 933 {USB_DEVICE(0x093a, 0x2625)},
933 {USB_DEVICE(0x093a, 0x2626)}, 934 {USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5f..6bce01a674f9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
82} 82}
83 83
84/*=========================================================================*/ 84/*=========================================================================*/
85/* bufffer bits */ 85/* buffer bits */
86 86
87/* function expects dev->io_mutex to be hold by caller */ 87/* function expects dev->io_mutex to be hold by caller */
88int hdpvr_cancel_queue(struct hdpvr_device *dev) 88int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
926 case V4L2_CID_MPEG_AUDIO_ENCODING: 926 case V4L2_CID_MPEG_AUDIO_ENCODING:
927 if (dev->flags & HDPVR_FLAG_AC3_CAP) { 927 if (dev->flags & HDPVR_FLAG_AC3_CAP) {
928 opt->audio_codec = ctrl->val; 928 opt->audio_codec = ctrl->val;
929 return hdpvr_set_audio(dev, opt->audio_input, 929 return hdpvr_set_audio(dev, opt->audio_input + 1,
930 opt->audio_codec); 930 opt->audio_codec);
931 } 931 }
932 return 0; 932 return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1199 V4L2_CID_MPEG_AUDIO_ENCODING, 1199 V4L2_CID_MPEG_AUDIO_ENCODING,
1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
1201 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); 1201 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1203 V4L2_CID_MPEG_VIDEO_ENCODING, 1203 V4L2_CID_MPEG_VIDEO_ENCODING,
1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, 1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd03..ce1c9f5d9dee 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
610 aspect.denominator = 9; 610 aspect.denominator = 9;
611 } else if (ratio == 34) { 611 } else if (ratio == 34) {
612 aspect.numerator = 4; 612 aspect.numerator = 4;
613 aspect.numerator = 3; 613 aspect.denominator = 3;
614 } else if (ratio == 68) { 614 } else if (ratio == 68) {
615 aspect.numerator = 15; 615 aspect.numerator = 15;
616 aspect.numerator = 9; 616 aspect.denominator = 9;
617 } else { 617 } else {
618 aspect.numerator = hor_landscape + 99; 618 aspect.numerator = hor_landscape + 99;
619 aspect.denominator = 100; 619 aspect.denominator = 100;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b04e7d059888..0431b46d9fd9 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
125 parent = *p; 125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb); 126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127 127
128 if (vol_id < av->vol_id) 128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left; 129 p = &(*p)->rb_left;
130 else 130 else
131 p = &(*p)->rb_right; 131 p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
423 pnum, err); 423 pnum, err);
424 ret = err > 0 ? UBI_BAD_FASTMAP : err; 424 ret = err > 0 ? UBI_BAD_FASTMAP : err;
425 goto out; 425 goto out;
426 } else if (ret == UBI_IO_BITFLIPS) 426 } else if (err == UBI_IO_BITFLIPS)
427 scrub = 1; 427 scrub = 1;
428 428
429 /* 429 /*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 14c00048bbec..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -129,14 +129,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
129 name); 129 name);
130 } 130 }
131 131
132 cq->irq_desc =
133 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
134 cq->vector));
135 } 132 }
136 } else { 133 } else {
137 cq->vector = (cq->ring + 1 + priv->port) % 134 cq->vector = (cq->ring + 1 + priv->port) %
138 mdev->dev->caps.num_comp_vectors; 135 mdev->dev->caps.num_comp_vectors;
139 } 136 }
137
138 cq->irq_desc =
139 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
140 cq->vector));
140 } else { 141 } else {
141 /* For TX we use the same irq per 142 /* For TX we use the same irq per
142 ring we assigned for the RX */ 143 ring we assigned for the RX */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ba0401d4af50..184c3615f479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
94 write_lock_irq(&table->lock); 94 write_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); 95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
96 write_unlock_irq(&table->lock); 96 write_unlock_irq(&table->lock);
97 if (err) {
98 mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
99 mlx5_base_mkey(mr->key), err);
100 mlx5_core_destroy_mkey(dev, mr);
101 }
97 102
98 return err; 103 return err;
99} 104}
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
104 struct mlx5_mr_table *table = &dev->priv.mr_table; 109 struct mlx5_mr_table *table = &dev->priv.mr_table;
105 struct mlx5_destroy_mkey_mbox_in in; 110 struct mlx5_destroy_mkey_mbox_in in;
106 struct mlx5_destroy_mkey_mbox_out out; 111 struct mlx5_destroy_mkey_mbox_out out;
112 struct mlx5_core_mr *deleted_mr;
107 unsigned long flags; 113 unsigned long flags;
108 int err; 114 int err;
109 115
110 memset(&in, 0, sizeof(in)); 116 memset(&in, 0, sizeof(in));
111 memset(&out, 0, sizeof(out)); 117 memset(&out, 0, sizeof(out));
112 118
119 write_lock_irqsave(&table->lock, flags);
120 deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
121 write_unlock_irqrestore(&table->lock, flags);
122 if (!deleted_mr) {
123 mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
124 mlx5_base_mkey(mr->key));
125 return -ENOENT;
126 }
127
113 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); 128 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
114 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); 129 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
115 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 130 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
119 if (out.hdr.status) 134 if (out.hdr.status)
120 return mlx5_cmd_status_to_err(&out.hdr); 135 return mlx5_cmd_status_to_err(&out.hdr);
121 136
122 write_lock_irqsave(&table->lock, flags);
123 radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
124 write_unlock_irqrestore(&table->lock, flags);
125
126 return err; 137 return err;
127} 138}
128EXPORT_SYMBOL(mlx5_core_destroy_mkey); 139EXPORT_SYMBOL(mlx5_core_destroy_mkey);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6175bd59190a..9887bcb45b84 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4226,6 +4226,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4226 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4226 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4227 break; 4227 break;
4228 case RTL_GIGA_MAC_VER_40: 4228 case RTL_GIGA_MAC_VER_40:
4229 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4230 break;
4229 case RTL_GIGA_MAC_VER_41: 4231 case RTL_GIGA_MAC_VER_41:
4230 case RTL_GIGA_MAC_VER_42: 4232 case RTL_GIGA_MAC_VER_42:
4231 case RTL_GIGA_MAC_VER_43: 4233 case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..fd411d6e19a2 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1083 return vp; 1083 return vp;
1084} 1084}
1085 1085
1086static void vnet_cleanup(void)
1087{
1088 struct vnet *vp;
1089 struct net_device *dev;
1090
1091 mutex_lock(&vnet_list_mutex);
1092 while (!list_empty(&vnet_list)) {
1093 vp = list_first_entry(&vnet_list, struct vnet, list);
1094 list_del(&vp->list);
1095 dev = vp->dev;
1096 /* vio_unregister_driver() should have cleaned up port_list */
1097 BUG_ON(!list_empty(&vp->port_list));
1098 unregister_netdev(dev);
1099 free_netdev(dev);
1100 }
1101 mutex_unlock(&vnet_list_mutex);
1102}
1103
1086static const char *local_mac_prop = "local-mac-address"; 1104static const char *local_mac_prop = "local-mac-address";
1087 1105
1088static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1106static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
1240 1258
1241 kfree(port); 1259 kfree(port);
1242 1260
1243 unregister_netdev(vp->dev);
1244 } 1261 }
1245 return 0; 1262 return 0;
1246} 1263}
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
1268static void __exit vnet_exit(void) 1285static void __exit vnet_exit(void)
1269{ 1286{
1270 vio_unregister_driver(&vnet_port_driver); 1287 vio_unregister_driver(&vnet_port_driver);
1288 vnet_cleanup();
1271} 1289}
1272 1290
1273module_init(vnet_init); 1291module_init(vnet_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 2031ce4051dc..765248b42a0a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -761,10 +761,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
761 }; 761 };
762 762
763 ppp_lock(ppp); 763 ppp_lock(ppp);
764 if (ppp->pass_filter) 764 if (ppp->pass_filter) {
765 sk_unattached_filter_destroy(ppp->pass_filter); 765 sk_unattached_filter_destroy(ppp->pass_filter);
766 err = sk_unattached_filter_create(&ppp->pass_filter, 766 ppp->pass_filter = NULL;
767 &fprog); 767 }
768 if (fprog.filter != NULL)
769 err = sk_unattached_filter_create(&ppp->pass_filter,
770 &fprog);
771 else
772 err = 0;
768 kfree(code); 773 kfree(code);
769 ppp_unlock(ppp); 774 ppp_unlock(ppp);
770 } 775 }
@@ -782,10 +787,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
782 }; 787 };
783 788
784 ppp_lock(ppp); 789 ppp_lock(ppp);
785 if (ppp->active_filter) 790 if (ppp->active_filter) {
786 sk_unattached_filter_destroy(ppp->active_filter); 791 sk_unattached_filter_destroy(ppp->active_filter);
787 err = sk_unattached_filter_create(&ppp->active_filter, 792 ppp->active_filter = NULL;
788 &fprog); 793 }
794 if (fprog.filter != NULL)
795 err = sk_unattached_filter_create(&ppp->active_filter,
796 &fprog);
797 else
798 err = 0;
789 kfree(code); 799 kfree(code);
790 ppp_unlock(ppp); 800 ppp_unlock(ppp);
791 } 801 }
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
195 .driver_info = (unsigned long)&huawei_cdc_ncm_info, 195 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
196 }, 196 },
197 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
198 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
199 },
197 200
198 /* Terminating entry */ 201 /* Terminating entry */
199 { 202 {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c4638c67f6b9..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 671 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 672 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 673 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -757,6 +758,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 758 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
758 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ 759 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
759 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 760 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
760 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 763 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
762 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 764 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index df6c07357556..5c47b011a9d7 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
122{ 122{
123 struct x25_asy *sl = netdev_priv(dev); 123 struct x25_asy *sl = netdev_priv(dev);
124 unsigned char *xbuff, *rbuff; 124 unsigned char *xbuff, *rbuff;
125 int len = 2 * newmtu; 125 int len;
126 126
127 if (newmtu > 65534)
128 return -EINVAL;
129
130 len = 2 * newmtu;
127 xbuff = kmalloc(len + 4, GFP_ATOMIC); 131 xbuff = kmalloc(len + 4, GFP_ATOMIC);
128 rbuff = kmalloc(len + 4, GFP_ATOMIC); 132 rbuff = kmalloc(len + 4, GFP_ATOMIC);
129 133
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 77127ca08ca4..769e553d3f45 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1030{ 1030{
1031 struct gnttab_map_grant_ref *gop_map = *gopp_map; 1031 struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033 /* This always points to the shinfo of the skb being checked, which
1034 * could be either the first or the one on the frag_list
1035 */
1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1036 struct skb_shared_info *shinfo = skb_shinfo(skb);
1037 /* If this is non-NULL, we are currently checking the frag_list skb, and
1038 * this points to the shinfo of the first one
1039 */
1040 struct skb_shared_info *first_shinfo = NULL;
1034 int nr_frags = shinfo->nr_frags; 1041 int nr_frags = shinfo->nr_frags;
1042 const bool sharedslot = nr_frags &&
1043 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1035 int i, err; 1044 int i, err;
1036 struct sk_buff *first_skb = NULL;
1037 1045
1038 /* Check status of header. */ 1046 /* Check status of header. */
1039 err = (*gopp_copy)->status; 1047 err = (*gopp_copy)->status;
1040 (*gopp_copy)++;
1041 if (unlikely(err)) { 1048 if (unlikely(err)) {
1042 if (net_ratelimit()) 1049 if (net_ratelimit())
1043 netdev_dbg(queue->vif->dev, 1050 netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1045 (*gopp_copy)->status, 1052 (*gopp_copy)->status,
1046 pending_idx, 1053 pending_idx,
1047 (*gopp_copy)->source.u.ref); 1054 (*gopp_copy)->source.u.ref);
1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1055 /* The first frag might still have this slot mapped */
1056 if (!sharedslot)
1057 xenvif_idx_release(queue, pending_idx,
1058 XEN_NETIF_RSP_ERROR);
1049 } 1059 }
1060 (*gopp_copy)++;
1050 1061
1051check_frags: 1062check_frags:
1052 for (i = 0; i < nr_frags; i++, gop_map++) { 1063 for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
1062 pending_idx, 1073 pending_idx,
1063 gop_map->handle); 1074 gop_map->handle);
1064 /* Had a previous error? Invalidate this fragment. */ 1075 /* Had a previous error? Invalidate this fragment. */
1065 if (unlikely(err)) 1076 if (unlikely(err)) {
1066 xenvif_idx_unmap(queue, pending_idx); 1077 xenvif_idx_unmap(queue, pending_idx);
1078 /* If the mapping of the first frag was OK, but
1079 * the header's copy failed, and they are
1080 * sharing a slot, send an error
1081 */
1082 if (i == 0 && sharedslot)
1083 xenvif_idx_release(queue, pending_idx,
1084 XEN_NETIF_RSP_ERROR);
1085 else
1086 xenvif_idx_release(queue, pending_idx,
1087 XEN_NETIF_RSP_OKAY);
1088 }
1067 continue; 1089 continue;
1068 } 1090 }
1069 1091
@@ -1075,42 +1097,53 @@ check_frags:
1075 gop_map->status, 1097 gop_map->status,
1076 pending_idx, 1098 pending_idx,
1077 gop_map->ref); 1099 gop_map->ref);
1100
1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1101 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1079 1102
1080 /* Not the first error? Preceding frags already invalidated. */ 1103 /* Not the first error? Preceding frags already invalidated. */
1081 if (err) 1104 if (err)
1082 continue; 1105 continue;
1083 /* First error: invalidate preceding fragments. */ 1106
1107 /* First error: if the header haven't shared a slot with the
1108 * first frag, release it as well.
1109 */
1110 if (!sharedslot)
1111 xenvif_idx_release(queue,
1112 XENVIF_TX_CB(skb)->pending_idx,
1113 XEN_NETIF_RSP_OKAY);
1114
1115 /* Invalidate preceding fragments of this skb. */
1084 for (j = 0; j < i; j++) { 1116 for (j = 0; j < i; j++) {
1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1117 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1086 xenvif_idx_unmap(queue, pending_idx); 1118 xenvif_idx_unmap(queue, pending_idx);
1119 xenvif_idx_release(queue, pending_idx,
1120 XEN_NETIF_RSP_OKAY);
1121 }
1122
1123 /* And if we found the error while checking the frag_list, unmap
1124 * the first skb's frags
1125 */
1126 if (first_shinfo) {
1127 for (j = 0; j < first_shinfo->nr_frags; j++) {
1128 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129 xenvif_idx_unmap(queue, pending_idx);
1130 xenvif_idx_release(queue, pending_idx,
1131 XEN_NETIF_RSP_OKAY);
1132 }
1087 } 1133 }
1088 1134
1089 /* Remember the error: invalidate all subsequent fragments. */ 1135 /* Remember the error: invalidate all subsequent fragments. */
1090 err = newerr; 1136 err = newerr;
1091 } 1137 }
1092 1138
1093 if (skb_has_frag_list(skb)) { 1139 if (skb_has_frag_list(skb) && !first_shinfo) {
1094 first_skb = skb; 1140 first_shinfo = skb_shinfo(skb);
1095 skb = shinfo->frag_list; 1141 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1096 shinfo = skb_shinfo(skb);
1097 nr_frags = shinfo->nr_frags; 1142 nr_frags = shinfo->nr_frags;
1098 1143
1099 goto check_frags; 1144 goto check_frags;
1100 } 1145 }
1101 1146
1102 /* There was a mapping error in the frag_list skb. We have to unmap
1103 * the first skb's frags
1104 */
1105 if (first_skb && err) {
1106 int j;
1107 shinfo = skb_shinfo(first_skb);
1108 for (j = 0; j < shinfo->nr_frags; j++) {
1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1110 xenvif_idx_unmap(queue, pending_idx);
1111 }
1112 }
1113
1114 *gopp_map = gop_map; 1147 *gopp_map = gop_map;
1115 return err; 1148 return err;
1116} 1149}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1518 1551
1519 /* Check the remap error code. */ 1552 /* Check the remap error code. */
1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1553 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1554 /* If there was an error, xenvif_tx_check_gop is
1555 * expected to release all the frags which were mapped,
1556 * so kfree_skb shouldn't do it again
1557 */
1521 skb_shinfo(skb)->nr_frags = 0; 1558 skb_shinfo(skb)->nr_frags = 0;
1559 if (skb_has_frag_list(skb)) {
1560 struct sk_buff *nskb =
1561 skb_shinfo(skb)->frag_list;
1562 skb_shinfo(nskb)->nr_frags = 0;
1563 }
1522 kfree_skb(skb); 1564 kfree_skb(skb);
1523 continue; 1565 continue;
1524 } 1566 }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1822 tx_unmap_op.status); 1864 tx_unmap_op.status);
1823 BUG(); 1865 BUG();
1824 } 1866 }
1825
1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1827} 1867}
1828 1868
1829static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f8656..220acb4cbee5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
633 } else 633 } else
634 raw3270_writesf_readpart(rp); 634 raw3270_writesf_readpart(rp);
635 memset(&rp->init_reset, 0, sizeof(rp->init_reset)); 635 memset(&rp->init_reset, 0, sizeof(rp->init_reset));
636 memset(&rp->init_data, 0, sizeof(rp->init_data));
637} 636}
638 637
639static int 638static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac8..4038437ff033 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
901 int rc; 901 int rc;
902 902
903 ap_dev->drv = ap_drv; 903 ap_dev->drv = ap_drv;
904
905 spin_lock_bh(&ap_device_list_lock);
906 list_add(&ap_dev->list, &ap_device_list);
907 spin_unlock_bh(&ap_device_list_lock);
908
904 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 909 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
905 if (!rc) { 910 if (rc) {
906 spin_lock_bh(&ap_device_list_lock); 911 spin_lock_bh(&ap_device_list_lock);
907 list_add(&ap_dev->list, &ap_device_list); 912 list_del_init(&ap_dev->list);
908 spin_unlock_bh(&ap_device_list_lock); 913 spin_unlock_bh(&ap_device_list_lock);
909 } 914 }
910 return rc; 915 return rc;
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047e..8afc6fee40c5 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_OMAP4 1config VIDEO_OMAP4
2 bool "OMAP 4 Camera support" 2 bool "OMAP 4 Camera support"
3 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
4 select VIDEOBUF2_DMA_CONTIG 4 select VIDEOBUF2_DMA_CONTIG
5 ---help--- 5 ---help---
6 Driver for an OMAP 4 ISS controller. 6 Driver for an OMAP 4 ISS controller.
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9d2b673f90e3..b8125aa64ad8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
1169 1169
1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) 1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1171 cap |= QH_IOS; 1171 cap |= QH_IOS;
1172 if (hwep->num) 1172
1173 cap |= QH_ZLT; 1173 cap |= QH_ZLT;
1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1175 /* 1175 /*
1176 * For ISO-TX, we set mult at QH as the largest value, and use 1176 * For ISO-TX, we set mult at QH as the largest value, and use
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21b99b4b4082..0e950ad8cb25 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
889 if (!hub_is_superspeed(hub->hdev)) 889 if (!hub_is_superspeed(hub->hdev))
890 return -EINVAL; 890 return -EINVAL;
891 891
892 ret = hub_port_status(hub, port1, &portstatus, &portchange);
893 if (ret < 0)
894 return ret;
895
896 /*
897 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
898 * Controller [1022:7814] will have spurious result making the following
899 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
900 * as high-speed device if we set the usb 3.0 port link state to
901 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
902 * check the state here to avoid the bug.
903 */
904 if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
905 USB_SS_PORT_LS_RX_DETECT) {
906 dev_dbg(&hub->ports[port1 - 1]->dev,
907 "Not disabling port; link state is RxDetect\n");
908 return ret;
909 }
910
892 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); 911 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
893 if (ret) 912 if (ret)
894 return ret; 913 return ret;
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b7a506f2bb14..5c660c77f03b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
426 * p2m are consistent. 426 * p2m are consistent.
427 */ 427 */
428 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 428 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429 unsigned long p;
430 struct page *scratch_page = get_balloon_scratch_page();
431
432 if (!PageHighMem(page)) { 429 if (!PageHighMem(page)) {
430 struct page *scratch_page = get_balloon_scratch_page();
431
433 ret = HYPERVISOR_update_va_mapping( 432 ret = HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT), 433 (unsigned long)__va(pfn << PAGE_SHIFT),
435 pfn_pte(page_to_pfn(scratch_page), 434 pfn_pte(page_to_pfn(scratch_page),
436 PAGE_KERNEL_RO), 0); 435 PAGE_KERNEL_RO), 0);
437 BUG_ON(ret); 436 BUG_ON(ret);
438 }
439 p = page_to_pfn(scratch_page);
440 __set_phys_to_machine(pfn, pfn_to_mfn(p));
441 437
442 put_balloon_scratch_page(); 438 put_balloon_scratch_page();
439 }
440 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
443 } 441 }
444#endif 442#endif
445 443
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c3667b202f2f..5f1e1f3cd186 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
88 88
89 if (!si->cancelled) { 89 if (!si->cancelled) {
90 xen_irq_resume(); 90 xen_irq_resume();
91 xen_console_resume();
92 xen_timer_resume(); 91 xen_timer_resume();
93 } 92 }
94 93
@@ -135,6 +134,10 @@ static void do_suspend(void)
135 134
136 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 135 err = stop_machine(xen_suspend, &si, cpumask_of(0));
137 136
137 /* Resume console as early as possible. */
138 if (!si.cancelled)
139 xen_console_resume();
140
138 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 141 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
139 142
140 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 143 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c7cf1d..7187b14faa6c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
484 log_list); 484 log_list);
485 list_del_init(&ordered->log_list); 485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]); 486 spin_unlock_irq(&log->log_extents_lock[index]);
487
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
493
494 WARN_ON(!inode);
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
496 }
487 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
488 &ordered->flags)); 498 &ordered->flags));
499
489 btrfs_put_ordered_extent(ordered); 500 btrfs_put_ordered_extent(ordered);
490 spin_lock_irq(&log->log_extents_lock[index]); 501 spin_lock_irq(&log->log_extents_lock[index]);
491 } 502 }
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6104676857f5..6cb82f62cb7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1680,11 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1682 1682
1683 if (device->bdev) 1683 if (device->bdev) {
1684 device->fs_devices->open_devices--; 1684 device->fs_devices->open_devices--;
1685 1685 /* remove sysfs entry */
1686 /* remove sysfs entry */ 1686 btrfs_kobj_rm_device(root->fs_info, device);
1687 btrfs_kobj_rm_device(root->fs_info, device); 1687 }
1688 1688
1689 call_rcu(&device->rcu, free_device); 1689 call_rcu(&device->rcu, free_device);
1690 1690
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
981 int error = 0; 981 int error = 0;
982 982
983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
985 985
986 mutex_lock(&fp->f_fl_mutex); 986 mutex_lock(&fp->f_fl_mutex);
987 987
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
991 goto out; 991 goto out;
992 flock_lock_file_wait(file, 992 flock_lock_file_wait(file,
993 &(struct file_lock){.fl_type = F_UNLCK}); 993 &(struct file_lock){.fl_type = F_UNLCK});
994 gfs2_glock_dq_wait(fl_gh); 994 gfs2_glock_dq(fl_gh);
995 gfs2_holder_reinit(state, flags, fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh);
996 } else { 996 } else {
997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f7320e44..ee4e04fe60fc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
731 cachep = gfs2_glock_aspace_cachep; 731 cachep = gfs2_glock_aspace_cachep;
732 else 732 else
733 cachep = gfs2_glock_cachep; 733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 734 gl = kmem_cache_alloc(cachep, GFP_NOFS);
735 if (!gl) 735 if (!gl)
736 return -ENOMEM; 736 return -ENOMEM;
737 737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739 739
740 if (glops->go_flags & GLOF_LVB) { 740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); 741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
742 if (!gl->gl_lksb.sb_lvbptr) { 742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl); 743 kmem_cache_free(cachep, gl);
744 return -ENOMEM; 744 return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1405 list_del_init(&gl->gl_lru); 1405 list_del_init(&gl->gl_lru);
1406 if (!spin_trylock(&gl->gl_spin)) { 1406 if (!spin_trylock(&gl->gl_spin)) {
1407add_back_to_lru:
1407 list_add(&gl->gl_lru, &lru_list); 1408 list_add(&gl->gl_lru, &lru_list);
1408 atomic_inc(&lru_count); 1409 atomic_inc(&lru_count);
1409 continue; 1410 continue;
1410 } 1411 }
1412 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1413 spin_unlock(&gl->gl_spin);
1414 goto add_back_to_lru;
1415 }
1411 clear_bit(GLF_LRU, &gl->gl_flags); 1416 clear_bit(GLF_LRU, &gl->gl_flags);
1412 spin_unlock(&lru_lock);
1413 gl->gl_lockref.count++; 1417 gl->gl_lockref.count++;
1414 if (demote_ok(gl)) 1418 if (demote_ok(gl))
1415 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1419 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
1417 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1421 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1418 gl->gl_lockref.count--; 1422 gl->gl_lockref.count--;
1419 spin_unlock(&gl->gl_spin); 1423 spin_unlock(&gl->gl_spin);
1420 spin_lock(&lru_lock); 1424 cond_resched_lock(&lru_lock);
1421 } 1425 }
1422} 1426}
1423 1427
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
1442 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1446 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1443 1447
1444 /* Test for being demotable */ 1448 /* Test for being demotable */
1445 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1449 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1446 list_move(&gl->gl_lru, &dispose); 1450 list_move(&gl->gl_lru, &dispose);
1447 atomic_dec(&lru_count); 1451 atomic_dec(&lru_count);
1448 freed++; 1452 freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
234 * inode_go_inval - prepare a inode glock to be released 234 * inode_go_inval - prepare a inode glock to be released
235 * @gl: the glock 235 * @gl: the glock
236 * @flags: 236 * @flags:
237 * 237 *
238 * Normally we invlidate everything, but if we are moving into 238 * Normally we invalidate everything, but if we are moving into
239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
240 * can keep hold of the metadata, since it won't have changed. 240 * can keep hold of the metadata, since it won't have changed.
241 * 241 *
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274de1246..4fafea1c9ecf 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1036 1036
1037 new_size = old_size + RECOVER_SIZE_INC; 1037 new_size = old_size + RECOVER_SIZE_INC;
1038 1038
1039 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1039 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1040 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1040 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1041 if (!submit || !result) { 1041 if (!submit || !result) {
1042 kfree(submit); 1042 kfree(submit);
1043 kfree(result); 1043 kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
337 337
338/** 338/**
339 * gfs2_free_extlen - Return extent length of free blocks 339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rbm: Starting position 340 * @rrbm: Starting position
341 * @len: Max length to check 341 * @len: Max length to check
342 * 342 *
343 * Starting at the block specified by the rbm, see how many free blocks 343 * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2522 2522
2523/** 2523/**
2524 * gfs2_rlist_free - free a resource group list 2524 * gfs2_rlist_free - free a resource group list
2525 * @list: the list of resource groups 2525 * @rlist: the list of resource groups
2526 * 2526 *
2527 */ 2527 */
2528 2528
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138cbc43..f11b9eed0de1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
756 spin_unlock(&dreq->lock); 756 spin_unlock(&dreq->lock);
757 757
758 while (!list_empty(&hdr->pages)) { 758 while (!list_empty(&hdr->pages)) {
759 bool do_destroy = true;
760 759
761 req = nfs_list_entry(hdr->pages.next); 760 req = nfs_list_entry(hdr->pages.next);
762 nfs_list_remove_request(req); 761 nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
765 case NFS_IOHDR_NEED_COMMIT: 764 case NFS_IOHDR_NEED_COMMIT:
766 kref_get(&req->wb_kref); 765 kref_get(&req->wb_kref);
767 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 766 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
768 do_destroy = false;
769 } 767 }
770 nfs_unlock_and_release_request(req); 768 nfs_unlock_and_release_request(req);
771 } 769 }
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf46660e..f415cbf9f6c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); 244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, 245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
246 const struct rpc_call_ops *, int, int); 246 const struct rpc_call_ops *, int, int);
247void nfs_free_request(struct nfs_page *req);
247 248
248static inline void nfs_iocounter_init(struct nfs_io_counter *c) 249static inline void nfs_iocounter_init(struct nfs_io_counter *c)
249{ 250{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6eda8dba..8f854dde4150 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
247 &posix_acl_default_xattr_handler, 247 &posix_acl_default_xattr_handler,
248 NULL, 248 NULL,
249}; 249};
250
251static int
252nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
253 size_t size, ssize_t *result)
254{
255 struct posix_acl *acl;
256 char *p = data + *result;
257
258 acl = get_acl(inode, type);
259 if (!acl)
260 return 0;
261
262 posix_acl_release(acl);
263
264 *result += strlen(name);
265 *result += 1;
266 if (!size)
267 return 0;
268 if (*result > size)
269 return -ERANGE;
270
271 strcpy(p, name);
272 return 0;
273}
274
275ssize_t
276nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
277{
278 struct inode *inode = dentry->d_inode;
279 ssize_t result = 0;
280 int error;
281
282 error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
283 POSIX_ACL_XATTR_ACCESS, data, size, &result);
284 if (error)
285 return error;
286
287 error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
288 POSIX_ACL_XATTR_DEFAULT, data, size, &result);
289 if (error)
290 return error;
291 return result;
292}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42bbc86..f0afa291fd58 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
885 .getattr = nfs_getattr, 885 .getattr = nfs_getattr,
886 .setattr = nfs_setattr, 886 .setattr = nfs_setattr,
887#ifdef CONFIG_NFS_V3_ACL 887#ifdef CONFIG_NFS_V3_ACL
888 .listxattr = generic_listxattr, 888 .listxattr = nfs3_listxattr,
889 .getxattr = generic_getxattr, 889 .getxattr = generic_getxattr,
890 .setxattr = generic_setxattr, 890 .setxattr = generic_setxattr,
891 .removexattr = generic_removexattr, 891 .removexattr = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
899 .getattr = nfs_getattr, 899 .getattr = nfs_getattr,
900 .setattr = nfs_setattr, 900 .setattr = nfs_setattr,
901#ifdef CONFIG_NFS_V3_ACL 901#ifdef CONFIG_NFS_V3_ACL
902 .listxattr = generic_listxattr, 902 .listxattr = nfs3_listxattr,
903 .getxattr = generic_getxattr, 903 .getxattr = generic_getxattr,
904 .setxattr = generic_setxattr, 904 .setxattr = generic_setxattr,
905 .removexattr = generic_removexattr, 905 .removexattr = generic_removexattr,
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6ee96d..17fab89f6358 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
29static struct kmem_cache *nfs_page_cachep; 29static struct kmem_cache *nfs_page_cachep;
30static const struct rpc_call_ops nfs_pgio_common_ops; 30static const struct rpc_call_ops nfs_pgio_common_ops;
31 31
32static void nfs_free_request(struct nfs_page *);
33
34static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
35{ 33{
36 p->npages = pagecount; 34 p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
239 WARN_ON_ONCE(prev == req); 237 WARN_ON_ONCE(prev == req);
240 238
241 if (!prev) { 239 if (!prev) {
240 /* a head request */
242 req->wb_head = req; 241 req->wb_head = req;
243 req->wb_this_page = req; 242 req->wb_this_page = req;
244 } else { 243 } else {
244 /* a subrequest */
245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
247 req->wb_head = prev->wb_head; 247 req->wb_head = prev->wb_head;
248 req->wb_this_page = prev->wb_this_page; 248 req->wb_this_page = prev->wb_this_page;
249 prev->wb_this_page = req; 249 prev->wb_this_page = req;
250 250
251 /* All subrequests take a ref on the head request until
252 * nfs_page_group_destroy is called */
253 kref_get(&req->wb_head->wb_kref);
254
251 /* grab extra ref if head request has extra ref from 255 /* grab extra ref if head request has extra ref from
252 * the write/commit path to handle handoff between write 256 * the write/commit path to handle handoff between write
253 * and commit lists */ 257 * and commit lists */
254 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) 258 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
259 set_bit(PG_INODE_REF, &req->wb_flags);
255 kref_get(&req->wb_kref); 260 kref_get(&req->wb_kref);
261 }
256 } 262 }
257} 263}
258 264
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
269 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 275 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
270 struct nfs_page *tmp, *next; 276 struct nfs_page *tmp, *next;
271 277
278 /* subrequests must release the ref on the head request */
279 if (req->wb_head != req)
280 nfs_release_request(req->wb_head);
281
272 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 282 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
273 return; 283 return;
274 284
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
394 * 404 *
395 * Note: Should never be called with the spinlock held! 405 * Note: Should never be called with the spinlock held!
396 */ 406 */
397static void nfs_free_request(struct nfs_page *req) 407void nfs_free_request(struct nfs_page *req)
398{ 408{
399 WARN_ON_ONCE(req->wb_this_page != req); 409 WARN_ON_ONCE(req->wb_this_page != req);
400 410
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
925 nfs_pageio_doio(desc); 935 nfs_pageio_doio(desc);
926 if (desc->pg_error < 0) 936 if (desc->pg_error < 0)
927 return 0; 937 return 0;
928 desc->pg_moreio = 0;
929 if (desc->pg_recoalesce) 938 if (desc->pg_recoalesce)
930 return 0; 939 return 0;
931 /* retry add_request for this subreq */ 940 /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
972 desc->pg_count = 0; 981 desc->pg_count = 0;
973 desc->pg_base = 0; 982 desc->pg_base = 0;
974 desc->pg_recoalesce = 0; 983 desc->pg_recoalesce = 0;
984 desc->pg_moreio = 0;
975 985
976 while (!list_empty(&head)) { 986 while (!list_empty(&head)) {
977 struct nfs_page *req; 987 struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 98ff061ccaf3..5e2f10304548 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 47static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48static const struct nfs_rw_ops nfs_rw_write_ops; 48static const struct nfs_rw_ops nfs_rw_write_ops;
49static void nfs_clear_request_commit(struct nfs_page *req);
49 50
50static struct kmem_cache *nfs_wdata_cachep; 51static struct kmem_cache *nfs_wdata_cachep;
51static mempool_t *nfs_wdata_mempool; 52static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
91 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 92 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
92} 93}
93 94
95/*
96 * nfs_page_find_head_request_locked - find head request associated with @page
97 *
98 * must be called while holding the inode lock.
99 *
100 * returns matching head request with reference held, or NULL if not found.
101 */
94static struct nfs_page * 102static struct nfs_page *
95nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) 103nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
96{ 104{
97 struct nfs_page *req = NULL; 105 struct nfs_page *req = NULL;
98 106
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
104 /* Linearly search the commit list for the correct req */ 112 /* Linearly search the commit list for the correct req */
105 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { 113 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
106 if (freq->wb_page == page) { 114 if (freq->wb_page == page) {
107 req = freq; 115 req = freq->wb_head;
108 break; 116 break;
109 } 117 }
110 } 118 }
111 } 119 }
112 120
113 if (req) 121 if (req) {
122 WARN_ON_ONCE(req->wb_head != req);
123
114 kref_get(&req->wb_kref); 124 kref_get(&req->wb_kref);
125 }
115 126
116 return req; 127 return req;
117} 128}
118 129
119static struct nfs_page *nfs_page_find_request(struct page *page) 130/*
131 * nfs_page_find_head_request - find head request associated with @page
132 *
133 * returns matching head request with reference held, or NULL if not found.
134 */
135static struct nfs_page *nfs_page_find_head_request(struct page *page)
120{ 136{
121 struct inode *inode = page_file_mapping(page)->host; 137 struct inode *inode = page_file_mapping(page)->host;
122 struct nfs_page *req = NULL; 138 struct nfs_page *req = NULL;
123 139
124 spin_lock(&inode->i_lock); 140 spin_lock(&inode->i_lock);
125 req = nfs_page_find_request_locked(NFS_I(inode), page); 141 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
126 spin_unlock(&inode->i_lock); 142 spin_unlock(&inode->i_lock);
127 return req; 143 return req;
128} 144}
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
274 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 290 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
275} 291}
276 292
277static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 293
294/* nfs_page_group_clear_bits
295 * @req - an nfs request
296 * clears all page group related bits from @req
297 */
298static void
299nfs_page_group_clear_bits(struct nfs_page *req)
300{
301 clear_bit(PG_TEARDOWN, &req->wb_flags);
302 clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
303 clear_bit(PG_UPTODATE, &req->wb_flags);
304 clear_bit(PG_WB_END, &req->wb_flags);
305 clear_bit(PG_REMOVE, &req->wb_flags);
306}
307
308
309/*
310 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
311 *
312 * this is a helper function for nfs_lock_and_join_requests
313 *
314 * @inode - inode associated with request page group, must be holding inode lock
315 * @head - head request of page group, must be holding head lock
316 * @req - request that couldn't lock and needs to wait on the req bit lock
317 * @nonblock - if true, don't actually wait
318 *
319 * NOTE: this must be called holding page_group bit lock and inode spin lock
320 * and BOTH will be released before returning.
321 *
322 * returns 0 on success, < 0 on error.
323 */
324static int
325nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
326 struct nfs_page *req, bool nonblock)
327 __releases(&inode->i_lock)
328{
329 struct nfs_page *tmp;
330 int ret;
331
332 /* relinquish all the locks successfully grabbed this run */
333 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
334 nfs_unlock_request(tmp);
335
336 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
337
338 /* grab a ref on the request that will be waited on */
339 kref_get(&req->wb_kref);
340
341 nfs_page_group_unlock(head);
342 spin_unlock(&inode->i_lock);
343
344 /* release ref from nfs_page_find_head_request_locked */
345 nfs_release_request(head);
346
347 if (!nonblock)
348 ret = nfs_wait_on_request(req);
349 else
350 ret = -EAGAIN;
351 nfs_release_request(req);
352
353 return ret;
354}
355
356/*
357 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
358 *
359 * @destroy_list - request list (using wb_this_page) terminated by @old_head
360 * @old_head - the old head of the list
361 *
362 * All subrequests must be locked and removed from all lists, so at this point
363 * they are only "active" in this function, and possibly in nfs_wait_on_request
364 * with a reference held by some other context.
365 */
366static void
367nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
368 struct nfs_page *old_head)
369{
370 while (destroy_list) {
371 struct nfs_page *subreq = destroy_list;
372
373 destroy_list = (subreq->wb_this_page == old_head) ?
374 NULL : subreq->wb_this_page;
375
376 WARN_ON_ONCE(old_head != subreq->wb_head);
377
378 /* make sure old group is not used */
379 subreq->wb_head = subreq;
380 subreq->wb_this_page = subreq;
381
382 nfs_clear_request_commit(subreq);
383
384 /* subreq is now totally disconnected from page group or any
385 * write / commit lists. last chance to wake any waiters */
386 nfs_unlock_request(subreq);
387
388 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
389 /* release ref on old head request */
390 nfs_release_request(old_head);
391
392 nfs_page_group_clear_bits(subreq);
393
394 /* release the PG_INODE_REF reference */
395 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
396 nfs_release_request(subreq);
397 else
398 WARN_ON_ONCE(1);
399 } else {
400 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
401 /* zombie requests have already released the last
402 * reference and were waiting on the rest of the
403 * group to complete. Since it's no longer part of a
404 * group, simply free the request */
405 nfs_page_group_clear_bits(subreq);
406 nfs_free_request(subreq);
407 }
408 }
409}
410
411/*
412 * nfs_lock_and_join_requests - join all subreqs to the head req and return
413 * a locked reference, cancelling any pending
414 * operations for this page.
415 *
416 * @page - the page used to lookup the "page group" of nfs_page structures
417 * @nonblock - if true, don't block waiting for request locks
418 *
419 * This function joins all sub requests to the head request by first
420 * locking all requests in the group, cancelling any pending operations
421 * and finally updating the head request to cover the whole range covered by
422 * the (former) group. All subrequests are removed from any write or commit
423 * lists, unlinked from the group and destroyed.
424 *
425 * Returns a locked, referenced pointer to the head request - which after
426 * this call is guaranteed to be the only request associated with the page.
427 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
428 * error was encountered.
429 */
430static struct nfs_page *
431nfs_lock_and_join_requests(struct page *page, bool nonblock)
278{ 432{
279 struct inode *inode = page_file_mapping(page)->host; 433 struct inode *inode = page_file_mapping(page)->host;
280 struct nfs_page *req; 434 struct nfs_page *head, *subreq;
435 struct nfs_page *destroy_list = NULL;
436 unsigned int total_bytes;
281 int ret; 437 int ret;
282 438
439try_again:
440 total_bytes = 0;
441
442 WARN_ON_ONCE(destroy_list);
443
283 spin_lock(&inode->i_lock); 444 spin_lock(&inode->i_lock);
284 for (;;) { 445
285 req = nfs_page_find_request_locked(NFS_I(inode), page); 446 /*
286 if (req == NULL) 447 * A reference is taken only on the head request which acts as a
287 break; 448 * reference to the whole page group - the group will not be destroyed
288 if (nfs_lock_request(req)) 449 * until the head reference is released.
289 break; 450 */
290 /* Note: If we hold the page lock, as is the case in nfs_writepage, 451 head = nfs_page_find_head_request_locked(NFS_I(inode), page);
291 * then the call to nfs_lock_request() will always 452
292 * succeed provided that someone hasn't already marked the 453 if (!head) {
293 * request as dirty (in which case we don't care).
294 */
295 spin_unlock(&inode->i_lock); 454 spin_unlock(&inode->i_lock);
296 if (!nonblock) 455 return NULL;
297 ret = nfs_wait_on_request(req); 456 }
298 else 457
299 ret = -EAGAIN; 458 /* lock each request in the page group */
300 nfs_release_request(req); 459 nfs_page_group_lock(head);
301 if (ret != 0) 460 subreq = head;
461 do {
462 /*
463 * Subrequests are always contiguous, non overlapping
464 * and in order. If not, it's a programming error.
465 */
466 WARN_ON_ONCE(subreq->wb_offset !=
467 (head->wb_offset + total_bytes));
468
469 /* keep track of how many bytes this group covers */
470 total_bytes += subreq->wb_bytes;
471
472 if (!nfs_lock_request(subreq)) {
473 /* releases page group bit lock and
474 * inode spin lock and all references */
475 ret = nfs_unroll_locks_and_wait(inode, head,
476 subreq, nonblock);
477
478 if (ret == 0)
479 goto try_again;
480
302 return ERR_PTR(ret); 481 return ERR_PTR(ret);
303 spin_lock(&inode->i_lock); 482 }
483
484 subreq = subreq->wb_this_page;
485 } while (subreq != head);
486
487 /* Now that all requests are locked, make sure they aren't on any list.
488 * Commit list removal accounting is done after locks are dropped */
489 subreq = head;
490 do {
491 nfs_list_remove_request(subreq);
492 subreq = subreq->wb_this_page;
493 } while (subreq != head);
494
495 /* unlink subrequests from head, destroy them later */
496 if (head->wb_this_page != head) {
497 /* destroy list will be terminated by head */
498 destroy_list = head->wb_this_page;
499 head->wb_this_page = head;
500
501 /* change head request to cover whole range that
502 * the former page group covered */
503 head->wb_bytes = total_bytes;
304 } 504 }
505
506 /*
507 * prepare head request to be added to new pgio descriptor
508 */
509 nfs_page_group_clear_bits(head);
510
511 /*
512 * some part of the group was still on the inode list - otherwise
513 * the group wouldn't be involved in async write.
514 * grab a reference for the head request, iff it needs one.
515 */
516 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
517 kref_get(&head->wb_kref);
518
519 nfs_page_group_unlock(head);
520
521 /* drop lock to clear_request_commit the head req and clean up
522 * requests on destroy list */
305 spin_unlock(&inode->i_lock); 523 spin_unlock(&inode->i_lock);
306 return req; 524
525 nfs_destroy_unlinked_subrequests(destroy_list, head);
526
527 /* clean up commit list state */
528 nfs_clear_request_commit(head);
529
530 /* still holds ref on head from nfs_page_find_head_request_locked
531 * and still has lock on head from lock loop */
532 return head;
307} 533}
308 534
309/* 535/*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
316 struct nfs_page *req; 542 struct nfs_page *req;
317 int ret = 0; 543 int ret = 0;
318 544
319 req = nfs_find_and_lock_request(page, nonblock); 545 req = nfs_lock_and_join_requests(page, nonblock);
320 if (!req) 546 if (!req)
321 goto out; 547 goto out;
322 ret = PTR_ERR(req); 548 ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
448 set_page_private(req->wb_page, (unsigned long)req); 674 set_page_private(req->wb_page, (unsigned long)req);
449 } 675 }
450 nfsi->npages++; 676 nfsi->npages++;
451 set_bit(PG_INODE_REF, &req->wb_flags); 677 /* this a head request for a page group - mark it as having an
678 * extra reference so sub groups can follow suit */
679 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
452 kref_get(&req->wb_kref); 680 kref_get(&req->wb_kref);
453 spin_unlock(&inode->i_lock); 681 spin_unlock(&inode->i_lock);
454} 682}
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
474 nfsi->npages--; 702 nfsi->npages--;
475 spin_unlock(&inode->i_lock); 703 spin_unlock(&inode->i_lock);
476 } 704 }
477 nfs_release_request(req); 705
706 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
707 nfs_release_request(req);
478} 708}
479 709
480static void 710static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
638{ 868{
639 struct nfs_commit_info cinfo; 869 struct nfs_commit_info cinfo;
640 unsigned long bytes = 0; 870 unsigned long bytes = 0;
641 bool do_destroy;
642 871
643 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 872 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
644 goto out; 873 goto out;
@@ -668,7 +897,6 @@ remove_req:
668next: 897next:
669 nfs_unlock_request(req); 898 nfs_unlock_request(req);
670 nfs_end_page_writeback(req); 899 nfs_end_page_writeback(req);
671 do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
672 nfs_release_request(req); 900 nfs_release_request(req);
673 } 901 }
674out: 902out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
769 spin_lock(&inode->i_lock); 997 spin_lock(&inode->i_lock);
770 998
771 for (;;) { 999 for (;;) {
772 req = nfs_page_find_request_locked(NFS_I(inode), page); 1000 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
773 if (req == NULL) 1001 if (req == NULL)
774 goto out_unlock; 1002 goto out_unlock;
775 1003
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
877 * dropped page. 1105 * dropped page.
878 */ 1106 */
879 do { 1107 do {
880 req = nfs_page_find_request(page); 1108 req = nfs_page_find_head_request(page);
881 if (req == NULL) 1109 if (req == NULL)
882 return 0; 1110 return 0;
883 l_ctx = req->wb_lock_context; 1111 l_ctx = req->wb_lock_context;
@@ -1569,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1569 struct nfs_page *req; 1797 struct nfs_page *req;
1570 int ret = 0; 1798 int ret = 0;
1571 1799
1572 for (;;) { 1800 wait_on_page_writeback(page);
1573 wait_on_page_writeback(page); 1801
1574 req = nfs_page_find_request(page); 1802 /* blocking call to cancel all requests and join to a single (head)
1575 if (req == NULL) 1803 * request */
1576 break; 1804 req = nfs_lock_and_join_requests(page, false);
1577 if (nfs_lock_request(req)) { 1805
1578 nfs_clear_request_commit(req); 1806 if (IS_ERR(req)) {
1579 nfs_inode_remove_request(req); 1807 ret = PTR_ERR(req);
1580 /* 1808 } else if (req) {
1581 * In case nfs_inode_remove_request has marked the 1809 /* all requests from this page have been cancelled by
1582 * page as being dirty 1810 * nfs_lock_and_join_requests, so just remove the head
1583 */ 1811 * request from the inode / page_private pointer and
1584 cancel_dirty_page(page, PAGE_CACHE_SIZE); 1812 * release it */
1585 nfs_unlock_and_release_request(req); 1813 nfs_inode_remove_request(req);
1586 break; 1814 /*
1587 } 1815 * In case nfs_inode_remove_request has marked the
1588 ret = nfs_wait_on_request(req); 1816 * page as being dirty
1589 nfs_release_request(req); 1817 */
1590 if (ret < 0) 1818 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1591 break; 1819 nfs_unlock_and_release_request(req);
1592 } 1820 }
1821
1593 return ret; 1822 return ret;
1594} 1823}
1595 1824
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df211b1..75c3fe5f3d9d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
4298} 4298}
4299 4299
4300 4300
4301int 4301static int
4302__xfs_bmapi_allocate( 4302xfs_bmapi_allocate(
4303 struct xfs_bmalloca *bma) 4303 struct xfs_bmalloca *bma)
4304{ 4304{
4305 struct xfs_mount *mp = bma->ip->i_mount; 4305 struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
4578 bma.flist = flist; 4578 bma.flist = flist;
4579 bma.firstblock = firstblock; 4579 bma.firstblock = firstblock;
4580 4580
4581 if (flags & XFS_BMAPI_STACK_SWITCH)
4582 bma.stack_switch = 1;
4583
4584 while (bno < end && n < *nmap) { 4581 while (bno < end && n < *nmap) {
4585 inhole = eof || bma.got.br_startoff > bno; 4582 inhole = eof || bma.got.br_startoff > bno;
4586 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4583 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e9b2f0..b879ca56a64c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free
77 * from written to unwritten, otherwise convert from unwritten to written. 77 * from written to unwritten, otherwise convert from unwritten to written.
78 */ 78 */
79#define XFS_BMAPI_CONVERT 0x040 79#define XFS_BMAPI_CONVERT 0x040
80#define XFS_BMAPI_STACK_SWITCH 0x080
81 80
82#define XFS_BMAPI_FLAGS \ 81#define XFS_BMAPI_FLAGS \
83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 82 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free
86 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
87 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
88 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 87 { XFS_BMAPI_CONTIG, "CONTIG" }, \
89 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 88 { XFS_BMAPI_CONVERT, "CONVERT" }
90 { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
91 89
92 90
93static inline int xfs_bmapi_aflag(int w) 91static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec1796c..64731ef3324d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc(
249} 249}
250 250
251/* 251/*
252 * Stack switching interfaces for allocation
253 */
254static void
255xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257{
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
261 unsigned long new_pflags = PF_FSTRANS;
262
263 /*
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
267 * in any way.
268 */
269 if (args->kswapd)
270 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
271
272 current_set_flags_nested(&pflags, new_pflags);
273
274 args->result = __xfs_bmapi_allocate(args);
275 complete(args->done);
276
277 current_restore_flags_nested(&pflags, new_pflags);
278}
279
280/*
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
284 */
285int
286xfs_bmapi_allocate(
287 struct xfs_bmalloca *args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290
291 if (!args->stack_switch)
292 return __xfs_bmapi_allocate(args);
293
294
295 args->done = &done;
296 args->kswapd = current_is_kswapd();
297 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
298 queue_work(xfs_alloc_wq, &args->work);
299 wait_for_completion(&done);
300 destroy_work_on_stack(&args->work);
301 return args->result;
302}
303
304/*
305 * Check if the endoff is outside the last extent. If so the caller will grow 252 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside 253 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case. 254 * the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f72232a64..2fdb72d2c908 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
55 bool userdata;/* set if is user data */ 55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */ 56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */ 57 bool conv; /* overwriting unwritten extents */
58 bool stack_switch;
59 bool kswapd; /* allocation in kswapd context */
60 int flags; 58 int flags;
61 struct completion *done; 59 struct completion *done;
62 struct work_struct work; 60 struct work_struct work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
66int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, 64int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
67 int *committed); 65 int *committed);
68int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 66int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
69int xfs_bmapi_allocate(struct xfs_bmalloca *args);
70int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
71int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 67int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
72 int whichfork, int *eof); 68 int whichfork, int *eof);
73int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, 69int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6baf2b..cf893bc1e373 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
33#include "xfs_error.h" 33#include "xfs_error.h"
34#include "xfs_trace.h" 34#include "xfs_trace.h"
35#include "xfs_cksum.h" 35#include "xfs_cksum.h"
36#include "xfs_alloc.h"
36 37
37/* 38/*
38 * Cursor allocation zone. 39 * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
2323 * record (to be inserted into parent). 2324 * record (to be inserted into parent).
2324 */ 2325 */
2325STATIC int /* error */ 2326STATIC int /* error */
2326xfs_btree_split( 2327__xfs_btree_split(
2327 struct xfs_btree_cur *cur, 2328 struct xfs_btree_cur *cur,
2328 int level, 2329 int level,
2329 union xfs_btree_ptr *ptrp, 2330 union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@ error0:
2503 return error; 2504 return error;
2504} 2505}
2505 2506
2507struct xfs_btree_split_args {
2508 struct xfs_btree_cur *cur;
2509 int level;
2510 union xfs_btree_ptr *ptrp;
2511 union xfs_btree_key *key;
2512 struct xfs_btree_cur **curp;
2513 int *stat; /* success/failure */
2514 int result;
2515 bool kswapd; /* allocation in kswapd context */
2516 struct completion *done;
2517 struct work_struct work;
2518};
2519
2520/*
2521 * Stack switching interfaces for allocation
2522 */
2523static void
2524xfs_btree_split_worker(
2525 struct work_struct *work)
2526{
2527 struct xfs_btree_split_args *args = container_of(work,
2528 struct xfs_btree_split_args, work);
2529 unsigned long pflags;
2530 unsigned long new_pflags = PF_FSTRANS;
2531
2532 /*
2533 * we are in a transaction context here, but may also be doing work
2534 * in kswapd context, and hence we may need to inherit that state
2535 * temporarily to ensure that we don't block waiting for memory reclaim
2536 * in any way.
2537 */
2538 if (args->kswapd)
2539 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2540
2541 current_set_flags_nested(&pflags, new_pflags);
2542
2543 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2544 args->key, args->curp, args->stat);
2545 complete(args->done);
2546
2547 current_restore_flags_nested(&pflags, new_pflags);
2548}
2549
2550/*
2551 * BMBT split requests often come in with little stack to work on. Push
2552 * them off to a worker thread so there is lots of stack to use. For the other
2553 * btree types, just call directly to avoid the context switch overhead here.
2554 */
2555STATIC int /* error */
2556xfs_btree_split(
2557 struct xfs_btree_cur *cur,
2558 int level,
2559 union xfs_btree_ptr *ptrp,
2560 union xfs_btree_key *key,
2561 struct xfs_btree_cur **curp,
2562 int *stat) /* success/failure */
2563{
2564 struct xfs_btree_split_args args;
2565 DECLARE_COMPLETION_ONSTACK(done);
2566
2567 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2568 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2569
2570 args.cur = cur;
2571 args.level = level;
2572 args.ptrp = ptrp;
2573 args.key = key;
2574 args.curp = curp;
2575 args.stat = stat;
2576 args.done = &done;
2577 args.kswapd = current_is_kswapd();
2578 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2579 queue_work(xfs_alloc_wq, &args.work);
2580 wait_for_completion(&done);
2581 destroy_work_on_stack(&args.work);
2582 return args.result;
2583}
2584
2585
2506/* 2586/*
2507 * Copy the old inode root contents into a real block and make the 2587 * Copy the old inode root contents into a real block and make the
2508 * broot point to it. 2588 * broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c551e3..6d3ec2b6ee29 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
749 * pointer that the caller gave to us. 749 * pointer that the caller gave to us.
750 */ 750 */
751 error = xfs_bmapi_write(tp, ip, map_start_fsb, 751 error = xfs_bmapi_write(tp, ip, map_start_fsb,
752 count_fsb, 752 count_fsb, 0,
753 XFS_BMAPI_STACK_SWITCH,
754 &first_block, 1, 753 &first_block, 1,
755 imap, &nimaps, &free_list); 754 imap, &nimaps, &free_list);
756 if (error) 755 if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b11f563..7703fa6770ff 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
483 } 483 }
484 484
485 /* 485 /*
486 * GQUOTINO and PQUOTINO cannot be used together in versions 486 * GQUOTINO and PQUOTINO cannot be used together in versions of
487 * of superblock that do not have pquotino. from->sb_flags 487 * superblock that do not have pquotino. from->sb_flags tells us which
488 * tells us which quota is active and should be copied to 488 * quota is active and should be copied to disk. If neither are active,
489 * disk. 489 * make sure we write NULLFSINO to the sb_gquotino field as a quota
490 * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
491 * bit is set.
492 *
493 * Note that we don't need to handle the sb_uquotino or sb_pquotino here
494 * as they do not require any translation. Hence the main sb field loop
495 * will write them appropriately from the in-core superblock.
490 */ 496 */
491 if ((*fields & XFS_SB_GQUOTINO) && 497 if ((*fields & XFS_SB_GQUOTINO) &&
492 (from->sb_qflags & XFS_GQUOTA_ACCT)) 498 (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
494 else if ((*fields & XFS_SB_PQUOTINO) && 500 else if ((*fields & XFS_SB_PQUOTINO) &&
495 (from->sb_qflags & XFS_PQUOTA_ACCT)) 501 (from->sb_qflags & XFS_PQUOTA_ACCT))
496 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 502 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
503 else {
504 /*
505 * We can't rely on just the fields being logged to tell us
506 * that it is safe to write NULLFSINO - we should only do that
507 * if quotas are not actually enabled. Hence only write
508 * NULLFSINO if both in-core quota inodes are NULL.
509 */
510 if (from->sb_gquotino == NULLFSINO &&
511 from->sb_pquotino == NULLFSINO)
512 to->sb_gquotino = cpu_to_be64(NULLFSINO);
513 }
497 514
498 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 515 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
499} 516}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
482 *********************************************************************/ 482 *********************************************************************/
483 483
484/* Special Values of .frequency field */ 484/* Special Values of .frequency field */
485#define CPUFREQ_ENTRY_INVALID ~0 485#define CPUFREQ_ENTRY_INVALID ~0u
486#define CPUFREQ_TABLE_END ~1 486#define CPUFREQ_TABLE_END ~1u
487/* Special Values of .flags field */ 487/* Special Values of .flags field */
488#define CPUFREQ_BOOST_FREQ (1 << 0) 488#define CPUFREQ_BOOST_FREQ (1 << 0)
489 489
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_queue;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21
22static inline void osq_lock_init(struct optimistic_spin_queue *lock)
23{
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25}
26
27#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..6a94cc8b1ca0 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16/* 16/*
17 * the rw-semaphore definition 17 * the rw-semaphore definition
18 * - if activity is 0 then there are no active readers or writers 18 * - if count is 0 then there are no active readers or writers
19 * - if activity is +ve then that is the number of active readers 19 * - if count is +ve then that is the number of active readers
20 * - if activity is -1 then there is one active writer 20 * - if count is -1 then there is one active writer
21 * - if wait_list is not empty, then there are processes waiting for the semaphore 21 * - if wait_list is not empty, then there are processes waiting for the semaphore
22 */ 22 */
23struct rw_semaphore { 23struct rw_semaphore {
24 __s32 activity; 24 __s32 count;
25 raw_spinlock_t wait_lock; 25 raw_spinlock_t wait_lock;
26 struct list_head wait_list; 26 struct list_head wait_list;
27#ifdef CONFIG_DEBUG_LOCK_ALLOC 27#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
18 20
19struct optimistic_spin_queue;
20struct rw_semaphore; 21struct rw_semaphore;
21 22
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
25/* All arch specific implementations share the same struct */ 26/* All arch specific implementations share the same struct */
26struct rw_semaphore { 27struct rw_semaphore {
27 long count; 28 long count;
28 raw_spinlock_t wait_lock;
29 struct list_head wait_list; 29 struct list_head wait_list;
30#ifdef CONFIG_SMP 30 raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32 struct optimistic_spin_queue osq; /* spinner MCS lock */
31 /* 33 /*
32 * Write owner. Used as a speculative check to see 34 * Write owner. Used as a speculative check to see
33 * if the owner is running on the cpu. 35 * if the owner is running on the cpu.
34 */ 36 */
35 struct task_struct *owner; 37 struct task_struct *owner;
36 struct optimistic_spin_queue *osq; /* spinner MCS lock */
37#endif 38#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 40 struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
64# define __RWSEM_DEP_MAP_INIT(lockname) 65# define __RWSEM_DEP_MAP_INIT(lockname)
65#endif 66#endif
66 67
67#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
68#define __RWSEM_INITIALIZER(name) \ 69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
69 { RWSEM_UNLOCKED_VALUE, \
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \
73 NULL /* mcs lock */ \
74 __RWSEM_DEP_MAP_INIT(name) }
75#else 70#else
76#define __RWSEM_INITIALIZER(name) \ 71#define __RWSEM_OPT_INIT(lockname)
77 { RWSEM_UNLOCKED_VALUE, \
78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEP_MAP_INIT(name) }
81#endif 72#endif
82 73
74#define __RWSEM_INITIALIZER(name) \
75 { .count = RWSEM_UNLOCKED_VALUE, \
76 .wait_list = LIST_HEAD_INIT((name).wait_list), \
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
78 __RWSEM_OPT_INIT(name) \
79 __RWSEM_DEP_MAP_INIT(name) }
80
83#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 83
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b88bd5a..c4d86198d3d6 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
6#include <linux/netfilter/nfnetlink.h> 6#include <linux/netfilter/nfnetlink.h>
7#include <linux/netfilter/x_tables.h> 7#include <linux/netfilter/x_tables.h>
8#include <linux/netfilter/nf_tables.h> 8#include <linux/netfilter/nf_tables.h>
9#include <linux/u64_stats_sync.h>
9#include <net/netlink.h> 10#include <net/netlink.h>
10 11
11#define NFT_JUMP_STACK_SIZE 16 12#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@ enum nft_chain_type {
528}; 529};
529 530
530struct nft_stats { 531struct nft_stats {
531 u64 bytes; 532 u64 bytes;
532 u64 pkts; 533 u64 pkts;
534 struct u64_stats_sync syncp;
533}; 535};
534 536
535#define NFT_HOOK_OPS_MAX 2 537#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394cb91a8..eee608b12cc9 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@ struct netns_nftables {
13 struct nft_af_info *inet; 13 struct nft_af_info *inet;
14 struct nft_af_info *arp; 14 struct nft_af_info *arp;
15 struct nft_af_info *bridge; 15 struct nft_af_info *bridge;
16 unsigned int base_seq;
16 u8 gencursor; 17 u8 gencursor;
17 u8 genctr;
18}; 18};
19 19
20#endif 20#endif
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9c0964..76768ee812b2 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
220 220
221endif 221endif
222 222
223config ARCH_SUPPORTS_ATOMIC_RMW
224 bool
225
223config MUTEX_SPIN_ON_OWNER 226config MUTEX_SPIN_ON_OWNER
224 def_bool y 227 def_bool y
225 depends on SMP && !DEBUG_MUTEXES 228 depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
229
230config RWSEM_SPIN_ON_OWNER
231 def_bool y
232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
226 233
227config ARCH_USE_QUEUE_RWLOCK 234config ARCH_USE_QUEUE_RWLOCK
228 bool 235 bool
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 838dc9e00669..be9ee1559fca 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -14,21 +14,47 @@
14 * called from interrupt context and we have preemption disabled while 14 * called from interrupt context and we have preemption disabled while
15 * spinning. 15 * spinning.
16 */ 16 */
17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); 17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
18
19/*
20 * We use the value 0 to represent "no CPU", thus the encoded value
21 * will be the CPU number incremented by 1.
22 */
23static inline int encode_cpu(int cpu_nr)
24{
25 return cpu_nr + 1;
26}
27
28static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
29{
30 int cpu_nr = encoded_cpu_val - 1;
31
32 return per_cpu_ptr(&osq_node, cpu_nr);
33}
18 34
19/* 35/*
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 36 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead. 37 * Can return NULL in case we were the last queued and we updated @lock instead.
22 */ 38 */
23static inline struct optimistic_spin_queue * 39static inline struct optimistic_spin_node *
24osq_wait_next(struct optimistic_spin_queue **lock, 40osq_wait_next(struct optimistic_spin_queue *lock,
25 struct optimistic_spin_queue *node, 41 struct optimistic_spin_node *node,
26 struct optimistic_spin_queue *prev) 42 struct optimistic_spin_node *prev)
27{ 43{
28 struct optimistic_spin_queue *next = NULL; 44 struct optimistic_spin_node *next = NULL;
45 int curr = encode_cpu(smp_processor_id());
46 int old;
47
48 /*
49 * If there is a prev node in queue, then the 'old' value will be
50 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
51 * we're currently last in queue, then the queue will then become empty.
52 */
53 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
29 54
30 for (;;) { 55 for (;;) {
31 if (*lock == node && cmpxchg(lock, node, prev) == node) { 56 if (atomic_read(&lock->tail) == curr &&
57 atomic_cmpxchg(&lock->tail, curr, old) == curr) {
32 /* 58 /*
33 * We were the last queued, we moved @lock back. @prev 59 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its 60 * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
59 return next; 85 return next;
60} 86}
61 87
62bool osq_lock(struct optimistic_spin_queue **lock) 88bool osq_lock(struct optimistic_spin_queue *lock)
63{ 89{
64 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 90 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
65 struct optimistic_spin_queue *prev, *next; 91 struct optimistic_spin_node *prev, *next;
92 int curr = encode_cpu(smp_processor_id());
93 int old;
66 94
67 node->locked = 0; 95 node->locked = 0;
68 node->next = NULL; 96 node->next = NULL;
97 node->cpu = curr;
69 98
70 node->prev = prev = xchg(lock, node); 99 old = atomic_xchg(&lock->tail, curr);
71 if (likely(prev == NULL)) 100 if (old == OSQ_UNLOCKED_VAL)
72 return true; 101 return true;
73 102
103 prev = decode_cpu(old);
104 node->prev = prev;
74 ACCESS_ONCE(prev->next) = node; 105 ACCESS_ONCE(prev->next) = node;
75 106
76 /* 107 /*
@@ -149,20 +180,21 @@ unqueue:
149 return false; 180 return false;
150} 181}
151 182
152void osq_unlock(struct optimistic_spin_queue **lock) 183void osq_unlock(struct optimistic_spin_queue *lock)
153{ 184{
154 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 185 struct optimistic_spin_node *node, *next;
155 struct optimistic_spin_queue *next; 186 int curr = encode_cpu(smp_processor_id());
156 187
157 /* 188 /*
158 * Fast path for the uncontended case. 189 * Fast path for the uncontended case.
159 */ 190 */
160 if (likely(cmpxchg(lock, node, NULL) == node)) 191 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
161 return; 192 return;
162 193
163 /* 194 /*
164 * Second most likely case. 195 * Second most likely case.
165 */ 196 */
197 node = this_cpu_ptr(&osq_node);
166 next = xchg(&node->next, NULL); 198 next = xchg(&node->next, NULL);
167 if (next) { 199 if (next) {
168 ACCESS_ONCE(next->locked) = 1; 200 ACCESS_ONCE(next->locked) = 1;
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index a2dbac4aca6b..74356dc0ce29 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
118 * mutex_lock()/rwsem_down_{read,write}() etc. 118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */ 119 */
120 120
121struct optimistic_spin_queue { 121struct optimistic_spin_node {
122 struct optimistic_spin_queue *next, *prev; 122 struct optimistic_spin_node *next, *prev;
123 int locked; /* 1 if lock acquired */ 123 int locked; /* 1 if lock acquired */
124 int cpu; /* encoded CPU # value */
124}; 125};
125 126
126extern bool osq_lock(struct optimistic_spin_queue **lock); 127extern bool osq_lock(struct optimistic_spin_queue *lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock); 128extern void osq_unlock(struct optimistic_spin_queue *lock);
128 129
129#endif /* __LINUX_MCS_SPINLOCK_H */ 130#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33c6760..acca2c1a3c5e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
60 INIT_LIST_HEAD(&lock->wait_list); 60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock); 61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63 lock->osq = NULL; 63 osq_lock_init(&lock->osq);
64#endif 64#endif
65 65
66 debug_mutex_init(lock, name, key); 66 debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 9be8a9144978..2c93571162cb 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
26 unsigned long flags; 26 unsigned long flags;
27 27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->activity != 0); 29 ret = (sem->count != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 } 31 }
32 return ret; 32 return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0); 47 lockdep_init_map(&sem->dep_map, name, key, 0);
48#endif 48#endif
49 sem->activity = 0; 49 sem->count = 0;
50 raw_spin_lock_init(&sem->wait_lock); 50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list); 51 INIT_LIST_HEAD(&sem->wait_list);
52} 52}
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
95 waiter = list_entry(next, struct rwsem_waiter, list); 95 waiter = list_entry(next, struct rwsem_waiter, list);
96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE); 96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97 97
98 sem->activity += woken; 98 sem->count += woken;
99 99
100 out: 100 out:
101 return sem; 101 return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
126 126
127 raw_spin_lock_irqsave(&sem->wait_lock, flags); 127 raw_spin_lock_irqsave(&sem->wait_lock, flags);
128 128
129 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 129 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
130 /* granted */ 130 /* granted */
131 sem->activity++; 131 sem->count++;
132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133 goto out; 133 goto out;
134 } 134 }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
170 170
171 raw_spin_lock_irqsave(&sem->wait_lock, flags); 171 raw_spin_lock_irqsave(&sem->wait_lock, flags);
172 172
173 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 173 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
174 /* granted */ 174 /* granted */
175 sem->activity++; 175 sem->count++;
176 ret = 1; 176 ret = 1;
177 } 177 }
178 178
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
206 * itself into sleep and waiting for system woke it or someone 206 * itself into sleep and waiting for system woke it or someone
207 * else in the head of the wait list up. 207 * else in the head of the wait list up.
208 */ 208 */
209 if (sem->activity == 0) 209 if (sem->count == 0)
210 break; 210 break;
211 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 211 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
214 raw_spin_lock_irqsave(&sem->wait_lock, flags); 214 raw_spin_lock_irqsave(&sem->wait_lock, flags);
215 } 215 }
216 /* got the lock */ 216 /* got the lock */
217 sem->activity = -1; 217 sem->count = -1;
218 list_del(&waiter.list); 218 list_del(&waiter.list);
219 219
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
235 235
236 raw_spin_lock_irqsave(&sem->wait_lock, flags); 236 raw_spin_lock_irqsave(&sem->wait_lock, flags);
237 237
238 if (sem->activity == 0) { 238 if (sem->count == 0) {
239 /* got the lock */ 239 /* got the lock */
240 sem->activity = -1; 240 sem->count = -1;
241 ret = 1; 241 ret = 1;
242 } 242 }
243 243
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
255 255
256 raw_spin_lock_irqsave(&sem->wait_lock, flags); 256 raw_spin_lock_irqsave(&sem->wait_lock, flags);
257 257
258 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 258 if (--sem->count == 0 && !list_empty(&sem->wait_list))
259 sem = __rwsem_wake_one_writer(sem); 259 sem = __rwsem_wake_one_writer(sem);
260 260
261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
270 270
271 raw_spin_lock_irqsave(&sem->wait_lock, flags); 271 raw_spin_lock_irqsave(&sem->wait_lock, flags);
272 272
273 sem->activity = 0; 273 sem->count = 0;
274 if (!list_empty(&sem->wait_list)) 274 if (!list_empty(&sem->wait_list))
275 sem = __rwsem_do_wake(sem, 1); 275 sem = __rwsem_do_wake(sem, 1);
276 276
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
287 287
288 raw_spin_lock_irqsave(&sem->wait_lock, flags); 288 raw_spin_lock_irqsave(&sem->wait_lock, flags);
289 289
290 sem->activity = 1; 290 sem->count = 1;
291 if (!list_empty(&sem->wait_list)) 291 if (!list_empty(&sem->wait_list))
292 sem = __rwsem_do_wake(sem, 0); 292 sem = __rwsem_do_wake(sem, 0);
293 293
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index dacc32142fcc..a2391ac135c8 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
82 sem->count = RWSEM_UNLOCKED_VALUE; 82 sem->count = RWSEM_UNLOCKED_VALUE;
83 raw_spin_lock_init(&sem->wait_lock); 83 raw_spin_lock_init(&sem->wait_lock);
84 INIT_LIST_HEAD(&sem->wait_list); 84 INIT_LIST_HEAD(&sem->wait_list);
85#ifdef CONFIG_SMP 85#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
86 sem->owner = NULL; 86 sem->owner = NULL;
87 sem->osq = NULL; 87 osq_lock_init(&sem->osq);
88#endif 88#endif
89} 89}
90 90
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
262 return false; 262 return false;
263} 263}
264 264
265#ifdef CONFIG_SMP 265#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
266/* 266/*
267 * Try to acquire write lock before the writer has been put on wait queue. 267 * Try to acquire write lock before the writer has been put on wait queue.
268 */ 268 */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
286{ 286{
287 struct task_struct *owner; 287 struct task_struct *owner;
288 bool on_cpu = true; 288 bool on_cpu = false;
289 289
290 if (need_resched()) 290 if (need_resched())
291 return 0; 291 return false;
292 292
293 rcu_read_lock(); 293 rcu_read_lock();
294 owner = ACCESS_ONCE(sem->owner); 294 owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
297 rcu_read_unlock(); 297 rcu_read_unlock();
298 298
299 /* 299 /*
300 * If sem->owner is not set, the rwsem owner may have 300 * If sem->owner is not set, yet we have just recently entered the
301 * just acquired it and not set the owner yet or the rwsem 301 * slowpath, then there is a possibility reader(s) may have the lock.
302 * has been released. 302 * To be safe, avoid spinning in these situations.
303 */ 303 */
304 return on_cpu; 304 return on_cpu;
305} 305}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806de49d4..e2d3bc7f03b4 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14 14
15#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) 15#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
16static inline void rwsem_set_owner(struct rw_semaphore *sem) 16static inline void rwsem_set_owner(struct rw_semaphore *sem)
17{ 17{
18 sem->owner = current; 18 sem->owner = current;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0ca8d83e2369..4ee194eb524b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -186,6 +186,7 @@ void thaw_processes(void)
186 186
187 printk("Restarting tasks ... "); 187 printk("Restarting tasks ... ");
188 188
189 __usermodehelper_set_disable_depth(UMH_FREEZING);
189 thaw_workqueues(); 190 thaw_workqueues();
190 191
191 read_lock(&tasklist_lock); 192 read_lock(&tasklist_lock);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4dd8822f732a..ed35a4790afe 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
306 error = suspend_ops->begin(state); 306 error = suspend_ops->begin(state);
307 if (error) 307 if (error)
308 goto Close; 308 goto Close;
309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { 309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
310 error = freeze_ops->begin(); 310 error = freeze_ops->begin();
311 if (error) 311 if (error)
312 goto Close; 312 goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
335 Close: 335 Close:
336 if (need_suspend_ops(state) && suspend_ops->end) 336 if (need_suspend_ops(state) && suspend_ops->end)
337 suspend_ops->end(); 337 suspend_ops->end();
338 else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) 338 else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
339 freeze_ops->end(); 339 freeze_ops->end();
340 340
341 return error; 341 return error;
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba77363fbb..625d0b0cd75a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
206 rdp->passed_quiesce = 1; 206 rdp->passed_quiesce = 1;
207} 207}
208 208
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
210
211static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
213 .dynticks = ATOMIC_INIT(1),
214#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
215 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
216 .dynticks_idle = ATOMIC_INIT(1),
217#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
218};
219
220/*
221 * Let the RCU core know that this CPU has gone through the scheduler,
222 * which is a quiescent state. This is called when the need for a
223 * quiescent state is urgent, so we burn an atomic operation and full
224 * memory barriers to let the RCU core know about it, regardless of what
225 * this CPU might (or might not) do in the near future.
226 *
227 * We inform the RCU core by emulating a zero-duration dyntick-idle
228 * period, which we in turn do by incrementing the ->dynticks counter
229 * by two.
230 */
231static void rcu_momentary_dyntick_idle(void)
232{
233 unsigned long flags;
234 struct rcu_data *rdp;
235 struct rcu_dynticks *rdtp;
236 int resched_mask;
237 struct rcu_state *rsp;
238
239 local_irq_save(flags);
240
241 /*
242 * Yes, we can lose flag-setting operations. This is OK, because
243 * the flag will be set again after some delay.
244 */
245 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
246 raw_cpu_write(rcu_sched_qs_mask, 0);
247
248 /* Find the flavor that needs a quiescent state. */
249 for_each_rcu_flavor(rsp) {
250 rdp = raw_cpu_ptr(rsp->rda);
251 if (!(resched_mask & rsp->flavor_mask))
252 continue;
253 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
254 if (ACCESS_ONCE(rdp->mynode->completed) !=
255 ACCESS_ONCE(rdp->cond_resched_completed))
256 continue;
257
258 /*
259 * Pretend to be momentarily idle for the quiescent state.
260 * This allows the grace-period kthread to record the
261 * quiescent state, with no need for this CPU to do anything
262 * further.
263 */
264 rdtp = this_cpu_ptr(&rcu_dynticks);
265 smp_mb__before_atomic(); /* Earlier stuff before QS. */
266 atomic_add(2, &rdtp->dynticks); /* QS. */
267 smp_mb__after_atomic(); /* Later stuff after QS. */
268 break;
269 }
270 local_irq_restore(flags);
271}
272
209/* 273/*
210 * Note a context switch. This is a quiescent state for RCU-sched, 274 * Note a context switch. This is a quiescent state for RCU-sched,
211 * and requires special handling for preemptible RCU. 275 * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
216 trace_rcu_utilization(TPS("Start context switch")); 280 trace_rcu_utilization(TPS("Start context switch"));
217 rcu_sched_qs(cpu); 281 rcu_sched_qs(cpu);
218 rcu_preempt_note_context_switch(cpu); 282 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle();
219 trace_rcu_utilization(TPS("End context switch")); 285 trace_rcu_utilization(TPS("End context switch"));
220} 286}
221EXPORT_SYMBOL_GPL(rcu_note_context_switch); 287EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222 288
223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 .dynticks = ATOMIC_INIT(1),
226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 .dynticks_idle = ATOMIC_INIT(1),
229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230};
231
232static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 289static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
233static long qhimark = 10000; /* If this many pending, ignore blimit. */ 290static long qhimark = 10000; /* If this many pending, ignore blimit. */
234static long qlowmark = 100; /* Once only this many pending, use blimit. */ 291static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
243module_param(jiffies_till_first_fqs, ulong, 0644); 300module_param(jiffies_till_first_fqs, ulong, 0644);
244module_param(jiffies_till_next_fqs, ulong, 0644); 301module_param(jiffies_till_next_fqs, ulong, 0644);
245 302
303/*
304 * How long the grace period must be before we start recruiting
305 * quiescent-state help from rcu_note_context_switch().
306 */
307static ulong jiffies_till_sched_qs = HZ / 20;
308module_param(jiffies_till_sched_qs, ulong, 0644);
309
246static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 310static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 struct rcu_data *rdp); 311 struct rcu_data *rdp);
248static void force_qs_rnp(struct rcu_state *rsp, 312static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
853 bool *isidle, unsigned long *maxj) 917 bool *isidle, unsigned long *maxj)
854{ 918{
855 unsigned int curr; 919 unsigned int curr;
920 int *rcrmp;
856 unsigned int snap; 921 unsigned int snap;
857 922
858 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 923 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
893 } 958 }
894 959
895 /* 960 /*
896 * There is a possibility that a CPU in adaptive-ticks state 961 * A CPU running for an extended time within the kernel can
897 * might run in the kernel with the scheduling-clock tick disabled 962 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
898 * for an extended time period. Invoke rcu_kick_nohz_cpu() to 963 * even context-switching back and forth between a pair of
899 * force the CPU to restart the scheduling-clock tick in this 964 * in-kernel CPU-bound tasks cannot advance grace periods.
900 * CPU is in this state. 965 * So if the grace period is old enough, make the CPU pay attention.
901 */ 966 * Note that the unsynchronized assignments to the per-CPU
902 rcu_kick_nohz_cpu(rdp->cpu); 967 * rcu_sched_qs_mask variable are safe. Yes, setting of
903 968 * bits can be lost, but they will be set again on the next
904 /* 969 * force-quiescent-state pass. So lost bit sets do not result
905 * Alternatively, the CPU might be running in the kernel 970 * in incorrect behavior, merely in a grace period lasting
906 * for an extended period of time without a quiescent state. 971 * a few jiffies longer than it might otherwise. Because
907 * Attempt to force the CPU through the scheduler to gain the 972 * there are at most four threads involved, and because the
908 * needed quiescent state, but only if the grace period has gone 973 * updates are only once every few jiffies, the probability of
909 * on for an uncommonly long time. If there are many stuck CPUs, 974 * lossage (and thus of slight grace-period extension) is
910 * we will beat on the first one until it gets unstuck, then move 975 * quite low.
911 * to the next. Only do this for the primary flavor of RCU. 976 *
977 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
978 * is set too high, we override with half of the RCU CPU stall
979 * warning delay.
912 */ 980 */
913 if (rdp->rsp == rcu_state_p && 981 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
982 if (ULONG_CMP_GE(jiffies,
983 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
914 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 984 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
915 rdp->rsp->jiffies_resched += 5; 985 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
916 resched_cpu(rdp->cpu); 986 ACCESS_ONCE(rdp->cond_resched_completed) =
987 ACCESS_ONCE(rdp->mynode->completed);
988 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
989 ACCESS_ONCE(*rcrmp) =
990 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
991 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
992 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
993 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
994 /* Time to beat on that CPU again! */
995 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
996 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
997 }
917 } 998 }
918 999
919 return 0; 1000 return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3491 "rcu_node_fqs_1", 3572 "rcu_node_fqs_1",
3492 "rcu_node_fqs_2", 3573 "rcu_node_fqs_2",
3493 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ 3574 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
3575 static u8 fl_mask = 0x1;
3494 int cpustride = 1; 3576 int cpustride = 1;
3495 int i; 3577 int i;
3496 int j; 3578 int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3509 for (i = 1; i < rcu_num_lvls; i++) 3591 for (i = 1; i < rcu_num_lvls; i++)
3510 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 3592 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3511 rcu_init_levelspread(rsp); 3593 rcu_init_levelspread(rsp);
3594 rsp->flavor_mask = fl_mask;
3595 fl_mask <<= 1;
3512 3596
3513 /* Initialize the elements themselves, starting from the leaves. */ 3597 /* Initialize the elements themselves, starting from the leaves. */
3514 3598
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e669691..0f69a79c5b7d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -307,6 +307,9 @@ struct rcu_data {
307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
309 unsigned long offline_fqs; /* Kicked due to being offline. */ 309 unsigned long offline_fqs; /* Kicked due to being offline. */
310 unsigned long cond_resched_completed;
311 /* Grace period that needs help */
312 /* from cond_resched(). */
310 313
311 /* 5) __rcu_pending() statistics. */ 314 /* 5) __rcu_pending() statistics. */
312 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 315 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
392 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ 395 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
393 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 396 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
394 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 397 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
398 u8 flavor_mask; /* bit in flavor mask. */
395 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 399 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
396 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 400 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
397 void (*func)(struct rcu_head *head)); 401 void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
563static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 567static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
564static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 568static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
565static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 569static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
566static void rcu_kick_nohz_cpu(int cpu); 570static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
567static bool init_nocb_callback_list(struct rcu_data *rdp); 571static bool init_nocb_callback_list(struct rcu_data *rdp);
568static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); 572static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
569static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); 573static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45265e2..02ac0fb186b8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2404 * if an adaptive-ticks CPU is failing to respond to the current grace 2404 * if an adaptive-ticks CPU is failing to respond to the current grace
2405 * period and has not be idle from an RCU perspective, kick it. 2405 * period and has not be idle from an RCU perspective, kick it.
2406 */ 2406 */
2407static void rcu_kick_nohz_cpu(int cpu) 2407static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2408{ 2408{
2409#ifdef CONFIG_NO_HZ_FULL 2409#ifdef CONFIG_NO_HZ_FULL
2410 if (tick_nohz_full_cpu(cpu)) 2410 if (tick_nohz_full_cpu(cpu))
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4df0f60..bc7883570530 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
200EXPORT_SYMBOL_GPL(wait_rcu_gp); 200EXPORT_SYMBOL_GPL(wait_rcu_gp);
201 201
202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
203static inline void debug_init_rcu_head(struct rcu_head *head) 203void init_rcu_head(struct rcu_head *head)
204{ 204{
205 debug_object_init(head, &rcuhead_debug_descr); 205 debug_object_init(head, &rcuhead_debug_descr);
206} 206}
207 207
208static inline void debug_rcu_head_free(struct rcu_head *head) 208void destroy_rcu_head(struct rcu_head *head)
209{ 209{
210 debug_object_free(head, &rcuhead_debug_descr); 210 debug_object_free(head, &rcuhead_debug_descr);
211} 211}
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
350early_initcall(check_cpu_stall_init); 350early_initcall(check_cpu_stall_init);
351 351
352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
353
354/*
355 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
356 */
357
358DEFINE_PER_CPU(int, rcu_cond_resched_count);
359
360/*
361 * Report a set of RCU quiescent states, for use by cond_resched()
362 * and friends. Out of line due to being called infrequently.
363 */
364void rcu_resched(void)
365{
366 preempt_disable();
367 __this_cpu_write(rcu_cond_resched_count, 0);
368 rcu_note_context_switch(smp_processor_id());
369 preempt_enable();
370}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..bc1638b33449 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
4147 4147
4148int __sched _cond_resched(void) 4148int __sched _cond_resched(void)
4149{ 4149{
4150 rcu_cond_resched();
4151 if (should_resched()) { 4150 if (should_resched()) {
4152 __cond_resched(); 4151 __cond_resched();
4153 return 1; 4152 return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
4166 */ 4165 */
4167int __cond_resched_lock(spinlock_t *lock) 4166int __cond_resched_lock(spinlock_t *lock)
4168{ 4167{
4169 bool need_rcu_resched = rcu_should_resched();
4170 int resched = should_resched(); 4168 int resched = should_resched();
4171 int ret = 0; 4169 int ret = 0;
4172 4170
4173 lockdep_assert_held(lock); 4171 lockdep_assert_held(lock);
4174 4172
4175 if (spin_needbreak(lock) || resched || need_rcu_resched) { 4173 if (spin_needbreak(lock) || resched) {
4176 spin_unlock(lock); 4174 spin_unlock(lock);
4177 if (resched) 4175 if (resched)
4178 __cond_resched(); 4176 __cond_resched();
4179 else if (unlikely(need_rcu_resched))
4180 rcu_resched();
4181 else 4177 else
4182 cpu_relax(); 4178 cpu_relax();
4183 ret = 1; 4179 ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
4191{ 4187{
4192 BUG_ON(!in_softirq()); 4188 BUG_ON(!in_softirq());
4193 4189
4194 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
4195 if (should_resched()) { 4190 if (should_resched()) {
4196 local_bh_enable(); 4191 local_bh_enable();
4197 __cond_resched(); 4192 __cond_resched();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 695f9773bb60..627b3c34b821 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
608 608
609 avg_atom = p->se.sum_exec_runtime; 609 avg_atom = p->se.sum_exec_runtime;
610 if (nr_switches) 610 if (nr_switches)
611 do_div(avg_atom, nr_switches); 611 avg_atom = div64_ul(avg_atom, nr_switches);
612 else 612 else
613 avg_atom = -1LL; 613 avg_atom = -1LL;
614 614
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 88c9c65a430d..fe75444ae7ec 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
585 struct itimerspec *new_setting, 585 struct itimerspec *new_setting,
586 struct itimerspec *old_setting) 586 struct itimerspec *old_setting)
587{ 587{
588 ktime_t exp;
589
588 if (!rtcdev) 590 if (!rtcdev)
589 return -ENOTSUPP; 591 return -ENOTSUPP;
590 592
593 if (flags & ~TIMER_ABSTIME)
594 return -EINVAL;
595
591 if (old_setting) 596 if (old_setting)
592 alarm_timer_get(timr, old_setting); 597 alarm_timer_get(timr, old_setting);
593 598
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
597 602
598 /* start the timer */ 603 /* start the timer */
599 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); 604 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
600 alarm_start(&timr->it.alarm.alarmtimer, 605 exp = timespec_to_ktime(new_setting->it_value);
601 timespec_to_ktime(new_setting->it_value)); 606 /* Convert (if necessary) to absolute time */
607 if (flags != TIMER_ABSTIME) {
608 ktime_t now;
609
610 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
611 exp = ktime_add(now, exp);
612 }
613
614 alarm_start(&timr->it.alarm.alarmtimer, exp);
602 return 0; 615 return 0;
603} 616}
604 617
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
730 if (!alarmtimer_get_rtcdev()) 743 if (!alarmtimer_get_rtcdev())
731 return -ENOTSUPP; 744 return -ENOTSUPP;
732 745
746 if (flags & ~TIMER_ABSTIME)
747 return -EINVAL;
748
733 if (!capable(CAP_WAKE_ALARM)) 749 if (!capable(CAP_WAKE_ALARM))
734 return -EPERM; 750 return -EPERM;
735 751
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b372e3ed675..ac9d1dad630b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
265 func = ftrace_ops_list_func; 265 func = ftrace_ops_list_func;
266 } 266 }
267 267
268 update_function_graph_func();
269
268 /* If there's no change, then do nothing more here */ 270 /* If there's no change, then do nothing more here */
269 if (ftrace_trace_function == func) 271 if (ftrace_trace_function == func)
270 return; 272 return;
271 273
272 update_function_graph_func();
273
274 /* 274 /*
275 * If we are using the list function, it doesn't care 275 * If we are using the list function, it doesn't care
276 * about the function_trace_ops. 276 * about the function_trace_ops.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d06943..ff7027199a9a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
616 struct ring_buffer_per_cpu *cpu_buffer; 616 struct ring_buffer_per_cpu *cpu_buffer;
617 struct rb_irq_work *work; 617 struct rb_irq_work *work;
618 618
619 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
620 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
621 return POLLIN | POLLRDNORM;
622
623 if (cpu == RING_BUFFER_ALL_CPUS) 619 if (cpu == RING_BUFFER_ALL_CPUS)
624 work = &buffer->irq_work; 620 work = &buffer->irq_work;
625 else { 621 else {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f243444a3772..291397e66669 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
466 struct print_entry *entry; 466 struct print_entry *entry;
467 unsigned long irq_flags; 467 unsigned long irq_flags;
468 int alloc; 468 int alloc;
469 int pc;
470
471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
474 pc = preempt_count();
469 475
470 if (unlikely(tracing_selftest_running || tracing_disabled)) 476 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0; 477 return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
475 local_save_flags(irq_flags); 481 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer; 482 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count()); 484 irq_flags, pc);
479 if (!event) 485 if (!event)
480 return 0; 486 return 0;
481 487
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
492 entry->buf[size] = '\0'; 498 entry->buf[size] = '\0';
493 499
494 __buffer_unlock_commit(buffer, event); 500 __buffer_unlock_commit(buffer, event);
501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
495 502
496 return size; 503 return size;
497} 504}
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
509 struct bputs_entry *entry; 516 struct bputs_entry *entry;
510 unsigned long irq_flags; 517 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry); 518 int size = sizeof(struct bputs_entry);
519 int pc;
520
521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
524 pc = preempt_count();
512 525
513 if (unlikely(tracing_selftest_running || tracing_disabled)) 526 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0; 527 return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
516 local_save_flags(irq_flags); 529 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer; 530 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count()); 532 irq_flags, pc);
520 if (!event) 533 if (!event)
521 return 0; 534 return 0;
522 535
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
525 entry->str = str; 538 entry->str = str;
526 539
527 __buffer_unlock_commit(buffer, event); 540 __buffer_unlock_commit(buffer, event);
541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
528 542
529 return 1; 543 return 1;
530} 544}
@@ -809,7 +823,7 @@ static struct {
809 { trace_clock_local, "local", 1 }, 823 { trace_clock_local, "local", 1 },
810 { trace_clock_global, "global", 1 }, 824 { trace_clock_global, "global", 1 },
811 { trace_clock_counter, "counter", 0 }, 825 { trace_clock_counter, "counter", 0 },
812 { trace_clock_jiffies, "uptime", 1 }, 826 { trace_clock_jiffies, "uptime", 0 },
813 { trace_clock, "perf", 1 }, 827 { trace_clock, "perf", 1 },
814 ARCH_TRACE_CLOCKS 828 ARCH_TRACE_CLOCKS
815}; 829};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348332b7..57b67b1f24d1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
59 59
60/* 60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 * Note that this use of jiffies_64 is not completely safe on
63 * 32-bit systems. But the window is tiny, and the effect if
64 * we are affected is that we will have an obviously bogus
65 * timestamp on a trace event - i.e. not life threatening.
62 */ 66 */
63u64 notrace trace_clock_jiffies(void) 67u64 notrace trace_clock_jiffies(void)
64{ 68{
65 u64 jiffy = jiffies - INITIAL_JIFFIES; 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69} 70}
70 71
71/* 72/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f99e0b3bca8c..2de53628689f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
470 470
471 list_del(&file->list); 471 list_del(&file->list);
472 remove_subsystem(file->system); 472 remove_subsystem(file->system);
473 free_event_filter(file->filter);
473 kmem_cache_free(file_cachep, file); 474 kmem_cache_free(file_cachep, file);
474} 475}
475 476
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec37950..a957c8140721 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest; 801 bla_dst_own = &bat_priv->bla.claim_dest;
802 802
803 /* check if it is a claim packet in general */
804 if (memcmp(bla_dst->magic, bla_dst_own->magic,
805 sizeof(bla_dst->magic)) != 0)
806 return 0;
807
808 /* if announcement packet, use the source, 803 /* if announcement packet, use the source,
809 * otherwise assume it is in the hw_src 804 * otherwise assume it is in the hw_src
810 */ 805 */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
866 struct batadv_hard_iface *primary_if, 861 struct batadv_hard_iface *primary_if,
867 struct sk_buff *skb) 862 struct sk_buff *skb)
868{ 863{
869 struct batadv_bla_claim_dst *bla_dst; 864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
870 uint8_t *hw_src, *hw_dst; 865 uint8_t *hw_src, *hw_dst;
871 struct vlan_ethhdr *vhdr; 866 struct vlan_hdr *vhdr, vhdr_buf;
872 struct ethhdr *ethhdr; 867 struct ethhdr *ethhdr;
873 struct arphdr *arphdr; 868 struct arphdr *arphdr;
874 unsigned short vid; 869 unsigned short vid;
870 int vlan_depth = 0;
875 __be16 proto; 871 __be16 proto;
876 int headlen; 872 int headlen;
877 int ret; 873 int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
882 proto = ethhdr->h_proto; 878 proto = ethhdr->h_proto;
883 headlen = ETH_HLEN; 879 headlen = ETH_HLEN;
884 if (vid & BATADV_VLAN_HAS_TAG) { 880 if (vid & BATADV_VLAN_HAS_TAG) {
885 vhdr = vlan_eth_hdr(skb); 881 /* Traverse the VLAN/Ethertypes.
886 proto = vhdr->h_vlan_encapsulated_proto; 882 *
887 headlen += VLAN_HLEN; 883 * At this point it is known that the first protocol is a VLAN
884 * header, so start checking at the encapsulated protocol.
885 *
886 * The depth of the VLAN headers is recorded to drop BLA claim
887 * frames encapsulated into multiple VLAN headers (QinQ).
888 */
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
888 } 899 }
889 900
890 if (proto != htons(ETH_P_ARP)) 901 if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
914 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
915 hw_dst = hw_src + ETH_ALEN + 4; 926 hw_dst = hw_src + ETH_ALEN + 4;
916 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930 /* check if it is a claim frame in general */
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935 /* check if there is a claim frame encapsulated deeper in (QinQ) and
936 * drop that, as this is not supported by BLA but should also not be
937 * sent via the mesh.
938 */
939 if (vlan_depth > 1)
940 return 1;
917 941
918 /* check if it is a claim frame. */ 942 /* check if it is a claim frame. */
919 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index d551e6302cf3..e0a723991c54 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@ out:
448 * possibly free it 448 * possibly free it
449 * @softif_vlan: the vlan object to release 449 * @softif_vlan: the vlan object to release
450 */ 450 */
451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) 451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
452{ 452{
453 if (atomic_dec_and_test(&softif_vlan->refcount)) 453 if (atomic_dec_and_test(&vlan->refcount)) {
454 kfree_rcu(softif_vlan, rcu); 454 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
455 hlist_del_rcu(&vlan->list);
456 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
457
458 kfree_rcu(vlan, rcu);
459 }
455} 460}
456 461
457/** 462/**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
505 if (!vlan) 510 if (!vlan)
506 return -ENOMEM; 511 return -ENOMEM;
507 512
513 vlan->bat_priv = bat_priv;
508 vlan->vid = vid; 514 vlan->vid = vid;
509 atomic_set(&vlan->refcount, 1); 515 atomic_set(&vlan->refcount, 1);
510 516
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
516 return err; 522 return err;
517 } 523 }
518 524
525 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
526 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
527 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
528
519 /* add a new TT local entry. This one will be marked with the NOPURGE 529 /* add a new TT local entry. This one will be marked with the NOPURGE
520 * flag 530 * flag
521 */ 531 */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
523 bat_priv->soft_iface->dev_addr, vid, 533 bat_priv->soft_iface->dev_addr, vid,
524 BATADV_NULL_IFINDEX, BATADV_NO_MARK); 534 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
525 535
526 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
527 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
528 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
529
530 return 0; 536 return 0;
531} 537}
532 538
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
538static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, 544static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
539 struct batadv_softif_vlan *vlan) 545 struct batadv_softif_vlan *vlan)
540{ 546{
541 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
542 hlist_del_rcu(&vlan->list);
543 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
544
545 batadv_sysfs_del_vlan(bat_priv, vlan);
546
547 /* explicitly remove the associated TT local entry because it is marked 547 /* explicitly remove the associated TT local entry because it is marked
548 * with the NOPURGE flag 548 * with the NOPURGE flag
549 */ 549 */
550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, 550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
551 vlan->vid, "vlan interface destroyed", false); 551 vlan->vid, "vlan interface destroyed", false);
552 552
553 batadv_sysfs_del_vlan(bat_priv, vlan);
553 batadv_softif_vlan_free_ref(vlan); 554 batadv_softif_vlan_free_ref(vlan);
554} 555}
555 556
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
567 unsigned short vid) 568 unsigned short vid)
568{ 569{
569 struct batadv_priv *bat_priv = netdev_priv(dev); 570 struct batadv_priv *bat_priv = netdev_priv(dev);
571 struct batadv_softif_vlan *vlan;
572 int ret;
570 573
571 /* only 802.1Q vlans are supported. 574 /* only 802.1Q vlans are supported.
572 * batman-adv does not know how to handle other types 575 * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
576 579
577 vid |= BATADV_VLAN_HAS_TAG; 580 vid |= BATADV_VLAN_HAS_TAG;
578 581
579 return batadv_softif_create_vlan(bat_priv, vid); 582 /* if a new vlan is getting created and it already exists, it means that
583 * it was not deleted yet. batadv_softif_vlan_get() increases the
584 * refcount in order to revive the object.
585 *
586 * if it does not exist then create it.
587 */
588 vlan = batadv_softif_vlan_get(bat_priv, vid);
589 if (!vlan)
590 return batadv_softif_create_vlan(bat_priv, vid);
591
592 /* recreate the sysfs object if it was already destroyed (and it should
593 * be since we received a kill_vid() for this vlan
594 */
595 if (!vlan->kobj) {
596 ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
597 if (ret) {
598 batadv_softif_vlan_free_ref(vlan);
599 return ret;
600 }
601 }
602
603 /* add a new TT local entry. This one will be marked with the NOPURGE
604 * flag. This must be added again, even if the vlan object already
605 * exists, because the entry was deleted by kill_vid()
606 */
607 batadv_tt_local_add(bat_priv->soft_iface,
608 bat_priv->soft_iface->dev_addr, vid,
609 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
610
611 return 0;
580} 612}
581 613
582/** 614/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde72c9a..5f59e7f899a0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
511 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
512 struct batadv_tt_local_entry *tt_local; 512 struct batadv_tt_local_entry *tt_local;
513 struct batadv_tt_global_entry *tt_global = NULL; 513 struct batadv_tt_global_entry *tt_global = NULL;
514 struct batadv_softif_vlan *vlan;
514 struct net_device *in_dev = NULL; 515 struct net_device *in_dev = NULL;
515 struct hlist_head *head; 516 struct hlist_head *head;
516 struct batadv_tt_orig_list_entry *orig_entry; 517 struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
572 if (!tt_local) 573 if (!tt_local)
573 goto out; 574 goto out;
574 575
576 /* increase the refcounter of the related vlan */
577 vlan = batadv_softif_vlan_get(bat_priv, vid);
578
575 batadv_dbg(BATADV_DBG_TT, bat_priv, 579 batadv_dbg(BATADV_DBG_TT, bat_priv,
576 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 580 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
577 addr, BATADV_PRINT_VID(vid), 581 addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
604 if (unlikely(hash_added != 0)) { 608 if (unlikely(hash_added != 0)) {
605 /* remove the reference for the hash */ 609 /* remove the reference for the hash */
606 batadv_tt_local_entry_free_ref(tt_local); 610 batadv_tt_local_entry_free_ref(tt_local);
611 batadv_softif_vlan_free_ref(vlan);
607 goto out; 612 goto out;
608 } 613 }
609 614
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1009{ 1014{
1010 struct batadv_tt_local_entry *tt_local_entry; 1015 struct batadv_tt_local_entry *tt_local_entry;
1011 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1016 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1017 struct batadv_softif_vlan *vlan;
1012 1018
1013 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1019 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1014 if (!tt_local_entry) 1020 if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1039 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1045 hlist_del_rcu(&tt_local_entry->common.hash_entry);
1040 batadv_tt_local_entry_free_ref(tt_local_entry); 1046 batadv_tt_local_entry_free_ref(tt_local_entry);
1041 1047
1048 /* decrease the reference held for this vlan */
1049 vlan = batadv_softif_vlan_get(bat_priv, vid);
1050 batadv_softif_vlan_free_ref(vlan);
1051 batadv_softif_vlan_free_ref(vlan);
1052
1042out: 1053out:
1043 if (tt_local_entry) 1054 if (tt_local_entry)
1044 batadv_tt_local_entry_free_ref(tt_local_entry); 1055 batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1111 spinlock_t *list_lock; /* protects write access to the hash lists */ 1122 spinlock_t *list_lock; /* protects write access to the hash lists */
1112 struct batadv_tt_common_entry *tt_common_entry; 1123 struct batadv_tt_common_entry *tt_common_entry;
1113 struct batadv_tt_local_entry *tt_local; 1124 struct batadv_tt_local_entry *tt_local;
1125 struct batadv_softif_vlan *vlan;
1114 struct hlist_node *node_tmp; 1126 struct hlist_node *node_tmp;
1115 struct hlist_head *head; 1127 struct hlist_head *head;
1116 uint32_t i; 1128 uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1131 tt_local = container_of(tt_common_entry, 1143 tt_local = container_of(tt_common_entry,
1132 struct batadv_tt_local_entry, 1144 struct batadv_tt_local_entry,
1133 common); 1145 common);
1146
1147 /* decrease the reference held for this vlan */
1148 vlan = batadv_softif_vlan_get(bat_priv,
1149 tt_common_entry->vid);
1150 batadv_softif_vlan_free_ref(vlan);
1151 batadv_softif_vlan_free_ref(vlan);
1152
1134 batadv_tt_local_entry_free_ref(tt_local); 1153 batadv_tt_local_entry_free_ref(tt_local);
1135 } 1154 }
1136 spin_unlock_bh(list_lock); 1155 spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3139 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 3158 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
3140 struct batadv_tt_common_entry *tt_common; 3159 struct batadv_tt_common_entry *tt_common;
3141 struct batadv_tt_local_entry *tt_local; 3160 struct batadv_tt_local_entry *tt_local;
3161 struct batadv_softif_vlan *vlan;
3142 struct hlist_node *node_tmp; 3162 struct hlist_node *node_tmp;
3143 struct hlist_head *head; 3163 struct hlist_head *head;
3144 spinlock_t *list_lock; /* protects write access to the hash lists */ 3164 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3167 tt_local = container_of(tt_common, 3187 tt_local = container_of(tt_common,
3168 struct batadv_tt_local_entry, 3188 struct batadv_tt_local_entry,
3169 common); 3189 common);
3190
3191 /* decrease the reference held for this vlan */
3192 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3193 batadv_softif_vlan_free_ref(vlan);
3194 batadv_softif_vlan_free_ref(vlan);
3195
3170 batadv_tt_local_entry_free_ref(tt_local); 3196 batadv_tt_local_entry_free_ref(tt_local);
3171 } 3197 }
3172 spin_unlock_bh(list_lock); 3198 spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a56773f..8854c05622a9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
687 687
688/** 688/**
689 * struct batadv_softif_vlan - per VLAN attributes set 689 * struct batadv_softif_vlan - per VLAN attributes set
690 * @bat_priv: pointer to the mesh object
690 * @vid: VLAN identifier 691 * @vid: VLAN identifier
691 * @kobj: kobject for sysfs vlan subdirectory 692 * @kobj: kobject for sysfs vlan subdirectory
692 * @ap_isolation: AP isolation state 693 * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
696 * @rcu: struct used for freeing in a RCU-safe manner 697 * @rcu: struct used for freeing in a RCU-safe manner
697 */ 698 */
698struct batadv_softif_vlan { 699struct batadv_softif_vlan {
700 struct batadv_priv *bat_priv;
699 unsigned short vid; 701 unsigned short vid;
700 struct kobject *kobj; 702 struct kobject *kobj;
701 atomic_t ap_isolation; /* boolean */ 703 atomic_t ap_isolation; /* boolean */
diff --git a/net/core/dev.c b/net/core/dev.c
index e52a3788d18d..e1b7cfaccd65 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4106,6 +4106,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4106 skb->vlan_tci = 0; 4106 skb->vlan_tci = 0;
4107 skb->dev = napi->dev; 4107 skb->dev = napi->dev;
4108 skb->skb_iif = 0; 4108 skb->skb_iif = 0;
4109 skb->encapsulation = 0;
4110 skb_shinfo(skb)->gso_type = 0;
4109 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4111 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4110 4112
4111 napi->skb = skb; 4113 napi->skb = skb;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9acec61f5433..dd8696a3dbec 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
150 goto put; 150 goto put;
151 151
152 memcpy(*_result, upayload->data, len); 152 memcpy(*_result, upayload->data, len);
153 *_result[len] = '\0'; 153 (*_result)[len] = '\0';
154 154
155 if (_expiry) 155 if (_expiry)
156 *_expiry = rkey->expiry; 156 *_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836cf772..d156b3c5f363 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1429 int proto = iph->protocol; 1429 int proto = iph->protocol;
1430 int err = -ENOSYS; 1430 int err = -ENOSYS;
1431 1431
1432 if (skb->encapsulation)
1433 skb_set_inner_network_header(skb, nhoff);
1434
1432 csum_replace2(&iph->check, iph->tot_len, newlen); 1435 csum_replace2(&iph->check, iph->tot_len, newlen);
1433 iph->tot_len = newlen; 1436 iph->tot_len = newlen;
1434 1437
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb12666..f0bdd47bbbcb 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
263 int err = -ENOENT; 263 int err = -ENOENT;
264 __be16 type; 264 __be16 type;
265 265
266 skb->encapsulation = 1;
267 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
268
266 type = greh->protocol; 269 type = greh->protocol;
267 if (greh->flags & GRE_KEY) 270 if (greh->flags & GRE_KEY)
268 grehlen += GRE_HEADER_SECTION; 271 grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aecea05cd..ad382499bace 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
288 optptr++; 288 optptr++;
289 continue; 289 continue;
290 } 290 }
291 if (unlikely(l < 2)) {
292 pp_ptr = optptr;
293 goto error;
294 }
291 optlen = optptr[1]; 295 optlen = optptr[1];
292 if (optlen < 2 || optlen > l) { 296 if (optlen < 2 || optlen > l) {
293 pp_ptr = optptr; 297 pp_ptr = optptr;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59ec7f7..55046ecd083e 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
309 309
310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 iph->daddr, 0); 311 iph->daddr, 0);
312 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 312 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
313 313
314 return tcp_gro_complete(skb); 314 return tcp_gro_complete(skb);
315} 315}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3cd1aed..01b0ff9a0c2c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
73 73
74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, 74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
75 &iph->daddr, 0); 75 &iph->daddr, 0);
76 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 76 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
77 77
78 return tcp_gro_complete(skb); 78 return tcp_gro_complete(skb);
79} 79}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566cfcbe4..8746ff9a8357 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
35{ 35{
36 INIT_LIST_HEAD(&afi->tables); 36 INIT_LIST_HEAD(&afi->tables);
37 nfnl_lock(NFNL_SUBSYS_NFTABLES); 37 nfnl_lock(NFNL_SUBSYS_NFTABLES);
38 list_add_tail(&afi->list, &net->nft.af_info); 38 list_add_tail_rcu(&afi->list, &net->nft.af_info);
39 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 39 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
40 return 0; 40 return 0;
41} 41}
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
51void nft_unregister_afinfo(struct nft_af_info *afi) 51void nft_unregister_afinfo(struct nft_af_info *afi)
52{ 52{
53 nfnl_lock(NFNL_SUBSYS_NFTABLES); 53 nfnl_lock(NFNL_SUBSYS_NFTABLES);
54 list_del(&afi->list); 54 list_del_rcu(&afi->list);
55 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 55 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
56} 56}
57EXPORT_SYMBOL_GPL(nft_unregister_afinfo); 57EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
277 struct net *net = sock_net(skb->sk); 277 struct net *net = sock_net(skb->sk);
278 int family = nfmsg->nfgen_family; 278 int family = nfmsg->nfgen_family;
279 279
280 list_for_each_entry(afi, &net->nft.af_info, list) { 280 rcu_read_lock();
281 cb->seq = net->nft.base_seq;
282
283 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
281 if (family != NFPROTO_UNSPEC && family != afi->family) 284 if (family != NFPROTO_UNSPEC && family != afi->family)
282 continue; 285 continue;
283 286
284 list_for_each_entry(table, &afi->tables, list) { 287 list_for_each_entry_rcu(table, &afi->tables, list) {
285 if (idx < s_idx) 288 if (idx < s_idx)
286 goto cont; 289 goto cont;
287 if (idx > s_idx) 290 if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
294 NLM_F_MULTI, 297 NLM_F_MULTI,
295 afi->family, table) < 0) 298 afi->family, table) < 0)
296 goto done; 299 goto done;
300
301 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
297cont: 302cont:
298 idx++; 303 idx++;
299 } 304 }
300 } 305 }
301done: 306done:
307 rcu_read_unlock();
302 cb->args[0] = idx; 308 cb->args[0] = idx;
303 return skb->len; 309 return skb->len;
304} 310}
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
407 if (flags & ~NFT_TABLE_F_DORMANT) 413 if (flags & ~NFT_TABLE_F_DORMANT)
408 return -EINVAL; 414 return -EINVAL;
409 415
416 if (flags == ctx->table->flags)
417 return 0;
418
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, 419 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table)); 420 sizeof(struct nft_trans_table));
412 if (trans == NULL) 421 if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
514 module_put(afi->owner); 523 module_put(afi->owner);
515 return err; 524 return err;
516 } 525 }
517 list_add_tail(&table->list, &afi->tables); 526 list_add_tail_rcu(&table->list, &afi->tables);
518 return 0; 527 return 0;
519} 528}
520 529
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
546 if (err < 0) 555 if (err < 0)
547 return err; 556 return err;
548 557
549 list_del(&table->list); 558 list_del_rcu(&table->list);
550 return 0; 559 return 0;
551} 560}
552 561
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
635{ 644{
636 struct nft_stats *cpu_stats, total; 645 struct nft_stats *cpu_stats, total;
637 struct nlattr *nest; 646 struct nlattr *nest;
647 unsigned int seq;
648 u64 pkts, bytes;
638 int cpu; 649 int cpu;
639 650
640 memset(&total, 0, sizeof(total)); 651 memset(&total, 0, sizeof(total));
641 for_each_possible_cpu(cpu) { 652 for_each_possible_cpu(cpu) {
642 cpu_stats = per_cpu_ptr(stats, cpu); 653 cpu_stats = per_cpu_ptr(stats, cpu);
643 total.pkts += cpu_stats->pkts; 654 do {
644 total.bytes += cpu_stats->bytes; 655 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
656 pkts = cpu_stats->pkts;
657 bytes = cpu_stats->bytes;
658 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
659 total.pkts += pkts;
660 total.bytes += bytes;
645 } 661 }
646 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); 662 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
647 if (nest == NULL) 663 if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
761 struct net *net = sock_net(skb->sk); 777 struct net *net = sock_net(skb->sk);
762 int family = nfmsg->nfgen_family; 778 int family = nfmsg->nfgen_family;
763 779
764 list_for_each_entry(afi, &net->nft.af_info, list) { 780 rcu_read_lock();
781 cb->seq = net->nft.base_seq;
782
783 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
765 if (family != NFPROTO_UNSPEC && family != afi->family) 784 if (family != NFPROTO_UNSPEC && family != afi->family)
766 continue; 785 continue;
767 786
768 list_for_each_entry(table, &afi->tables, list) { 787 list_for_each_entry_rcu(table, &afi->tables, list) {
769 list_for_each_entry(chain, &table->chains, list) { 788 list_for_each_entry_rcu(chain, &table->chains, list) {
770 if (idx < s_idx) 789 if (idx < s_idx)
771 goto cont; 790 goto cont;
772 if (idx > s_idx) 791 if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
778 NLM_F_MULTI, 797 NLM_F_MULTI,
779 afi->family, table, chain) < 0) 798 afi->family, table, chain) < 0)
780 goto done; 799 goto done;
800
801 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
781cont: 802cont:
782 idx++; 803 idx++;
783 } 804 }
784 } 805 }
785 } 806 }
786done: 807done:
808 rcu_read_unlock();
787 cb->args[0] = idx; 809 cb->args[0] = idx;
788 return skb->len; 810 return skb->len;
789} 811}
790 812
791
792static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
793 const struct nlmsghdr *nlh, 814 const struct nlmsghdr *nlh,
794 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 882 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
862 return ERR_PTR(-EINVAL); 883 return ERR_PTR(-EINVAL);
863 884
864 newstats = alloc_percpu(struct nft_stats); 885 newstats = netdev_alloc_pcpu_stats(struct nft_stats);
865 if (newstats == NULL) 886 if (newstats == NULL)
866 return ERR_PTR(-ENOMEM); 887 return ERR_PTR(-ENOMEM);
867 888
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1077 } 1098 }
1078 basechain->stats = stats; 1099 basechain->stats = stats;
1079 } else { 1100 } else {
1080 stats = alloc_percpu(struct nft_stats); 1101 stats = netdev_alloc_pcpu_stats(struct nft_stats);
1081 if (IS_ERR(stats)) { 1102 if (IS_ERR(stats)) {
1082 module_put(type->owner); 1103 module_put(type->owner);
1083 kfree(basechain); 1104 kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1130 goto err2; 1151 goto err2;
1131 1152
1132 table->use++; 1153 table->use++;
1133 list_add_tail(&chain->list, &table->chains); 1154 list_add_tail_rcu(&chain->list, &table->chains);
1134 return 0; 1155 return 0;
1135err2: 1156err2:
1136 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1157 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1180 return err; 1201 return err;
1181 1202
1182 table->use--; 1203 table->use--;
1183 list_del(&chain->list); 1204 list_del_rcu(&chain->list);
1184 return 0; 1205 return 0;
1185} 1206}
1186 1207
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
1199{ 1220{
1200 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1221 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1201 if (type->family == NFPROTO_UNSPEC) 1222 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions); 1223 list_add_tail_rcu(&type->list, &nf_tables_expressions);
1203 else 1224 else
1204 list_add(&type->list, &nf_tables_expressions); 1225 list_add_rcu(&type->list, &nf_tables_expressions);
1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1226 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1206 return 0; 1227 return 0;
1207} 1228}
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
1216void nft_unregister_expr(struct nft_expr_type *type) 1237void nft_unregister_expr(struct nft_expr_type *type)
1217{ 1238{
1218 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1239 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1219 list_del(&type->list); 1240 list_del_rcu(&type->list);
1220 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1241 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1221} 1242}
1222EXPORT_SYMBOL_GPL(nft_unregister_expr); 1243EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1549 unsigned int idx = 0, s_idx = cb->args[0]; 1570 unsigned int idx = 0, s_idx = cb->args[0];
1550 struct net *net = sock_net(skb->sk); 1571 struct net *net = sock_net(skb->sk);
1551 int family = nfmsg->nfgen_family; 1572 int family = nfmsg->nfgen_family;
1552 u8 genctr = ACCESS_ONCE(net->nft.genctr);
1553 u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
1554 1573
1555 list_for_each_entry(afi, &net->nft.af_info, list) { 1574 rcu_read_lock();
1575 cb->seq = net->nft.base_seq;
1576
1577 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
1556 if (family != NFPROTO_UNSPEC && family != afi->family) 1578 if (family != NFPROTO_UNSPEC && family != afi->family)
1557 continue; 1579 continue;
1558 1580
1559 list_for_each_entry(table, &afi->tables, list) { 1581 list_for_each_entry_rcu(table, &afi->tables, list) {
1560 list_for_each_entry(chain, &table->chains, list) { 1582 list_for_each_entry_rcu(chain, &table->chains, list) {
1561 list_for_each_entry(rule, &chain->rules, list) { 1583 list_for_each_entry_rcu(rule, &chain->rules, list) {
1562 if (!nft_rule_is_active(net, rule)) 1584 if (!nft_rule_is_active(net, rule))
1563 goto cont; 1585 goto cont;
1564 if (idx < s_idx) 1586 if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1572 NLM_F_MULTI | NLM_F_APPEND, 1594 NLM_F_MULTI | NLM_F_APPEND,
1573 afi->family, table, chain, rule) < 0) 1595 afi->family, table, chain, rule) < 0)
1574 goto done; 1596 goto done;
1597
1598 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1575cont: 1599cont:
1576 idx++; 1600 idx++;
1577 } 1601 }
@@ -1579,9 +1603,7 @@ cont:
1579 } 1603 }
1580 } 1604 }
1581done: 1605done:
1582 /* Invalidate this dump, a transition to the new generation happened */ 1606 rcu_read_unlock();
1583 if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
1584 return -EBUSY;
1585 1607
1586 cb->args[0] = idx; 1608 cb->args[0] = idx;
1587 return skb->len; 1609 return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
1932int nft_register_set(struct nft_set_ops *ops) 1954int nft_register_set(struct nft_set_ops *ops)
1933{ 1955{
1934 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1956 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1935 list_add_tail(&ops->list, &nf_tables_set_ops); 1957 list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
1936 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1958 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1937 return 0; 1959 return 0;
1938} 1960}
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
1941void nft_unregister_set(struct nft_set_ops *ops) 1963void nft_unregister_set(struct nft_set_ops *ops)
1942{ 1964{
1943 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1965 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1944 list_del(&ops->list); 1966 list_del_rcu(&ops->list);
1945 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1967 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1946} 1968}
1947EXPORT_SYMBOL_GPL(nft_unregister_set); 1969EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2234 if (cb->args[1]) 2256 if (cb->args[1])
2235 return skb->len; 2257 return skb->len;
2236 2258
2237 list_for_each_entry(set, &ctx->table->sets, list) { 2259 rcu_read_lock();
2260 cb->seq = ctx->net->nft.base_seq;
2261
2262 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2238 if (idx < s_idx) 2263 if (idx < s_idx)
2239 goto cont; 2264 goto cont;
2240 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2265 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2242 cb->args[0] = idx; 2267 cb->args[0] = idx;
2243 goto done; 2268 goto done;
2244 } 2269 }
2270 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2245cont: 2271cont:
2246 idx++; 2272 idx++;
2247 } 2273 }
2248 cb->args[1] = 1; 2274 cb->args[1] = 1;
2249done: 2275done:
2276 rcu_read_unlock();
2250 return skb->len; 2277 return skb->len;
2251} 2278}
2252 2279
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2260 if (cb->args[1]) 2287 if (cb->args[1])
2261 return skb->len; 2288 return skb->len;
2262 2289
2263 list_for_each_entry(table, &ctx->afi->tables, list) { 2290 rcu_read_lock();
2291 cb->seq = ctx->net->nft.base_seq;
2292
2293 list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
2264 if (cur_table) { 2294 if (cur_table) {
2265 if (cur_table != table) 2295 if (cur_table != table)
2266 continue; 2296 continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2269 } 2299 }
2270 ctx->table = table; 2300 ctx->table = table;
2271 idx = 0; 2301 idx = 0;
2272 list_for_each_entry(set, &ctx->table->sets, list) { 2302 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2273 if (idx < s_idx) 2303 if (idx < s_idx)
2274 goto cont; 2304 goto cont;
2275 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2305 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2278 cb->args[2] = (unsigned long) table; 2308 cb->args[2] = (unsigned long) table;
2279 goto done; 2309 goto done;
2280 } 2310 }
2311 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2281cont: 2312cont:
2282 idx++; 2313 idx++;
2283 } 2314 }
2284 } 2315 }
2285 cb->args[1] = 1; 2316 cb->args[1] = 1;
2286done: 2317done:
2318 rcu_read_unlock();
2287 return skb->len; 2319 return skb->len;
2288} 2320}
2289 2321
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2300 if (cb->args[1]) 2332 if (cb->args[1])
2301 return skb->len; 2333 return skb->len;
2302 2334
2303 list_for_each_entry(afi, &net->nft.af_info, list) { 2335 rcu_read_lock();
2336 cb->seq = net->nft.base_seq;
2337
2338 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
2304 if (cur_family) { 2339 if (cur_family) {
2305 if (afi->family != cur_family) 2340 if (afi->family != cur_family)
2306 continue; 2341 continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2308 cur_family = 0; 2343 cur_family = 0;
2309 } 2344 }
2310 2345
2311 list_for_each_entry(table, &afi->tables, list) { 2346 list_for_each_entry_rcu(table, &afi->tables, list) {
2312 if (cur_table) { 2347 if (cur_table) {
2313 if (cur_table != table) 2348 if (cur_table != table)
2314 continue; 2349 continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2319 ctx->table = table; 2354 ctx->table = table;
2320 ctx->afi = afi; 2355 ctx->afi = afi;
2321 idx = 0; 2356 idx = 0;
2322 list_for_each_entry(set, &ctx->table->sets, list) { 2357 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2323 if (idx < s_idx) 2358 if (idx < s_idx)
2324 goto cont; 2359 goto cont;
2325 if (nf_tables_fill_set(skb, ctx, set, 2360 if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2330 cb->args[3] = afi->family; 2365 cb->args[3] = afi->family;
2331 goto done; 2366 goto done;
2332 } 2367 }
2368 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2333cont: 2369cont:
2334 idx++; 2370 idx++;
2335 } 2371 }
@@ -2339,6 +2375,7 @@ cont:
2339 } 2375 }
2340 cb->args[1] = 1; 2376 cb->args[1] = 1;
2341done: 2377done:
2378 rcu_read_unlock();
2342 return skb->len; 2379 return skb->len;
2343} 2380}
2344 2381
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2597 if (err < 0) 2634 if (err < 0)
2598 goto err2; 2635 goto err2;
2599 2636
2600 list_add_tail(&set->list, &table->sets); 2637 list_add_tail_rcu(&set->list, &table->sets);
2601 table->use++; 2638 table->use++;
2602 return 0; 2639 return 0;
2603 2640
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
2617 2654
2618static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2655static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2619{ 2656{
2620 list_del(&set->list); 2657 list_del_rcu(&set->list);
2621 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); 2658 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2622 nft_set_destroy(set); 2659 nft_set_destroy(set);
2623} 2660}
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2652 if (err < 0) 2689 if (err < 0)
2653 return err; 2690 return err;
2654 2691
2655 list_del(&set->list); 2692 list_del_rcu(&set->list);
2656 ctx.table->use--; 2693 ctx.table->use--;
2657 return 0; 2694 return 0;
2658} 2695}
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2704 } 2741 }
2705bind: 2742bind:
2706 binding->chain = ctx->chain; 2743 binding->chain = ctx->chain;
2707 list_add_tail(&binding->list, &set->bindings); 2744 list_add_tail_rcu(&binding->list, &set->bindings);
2708 return 0; 2745 return 0;
2709} 2746}
2710 2747
2711void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 2748void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2712 struct nft_set_binding *binding) 2749 struct nft_set_binding *binding)
2713{ 2750{
2714 list_del(&binding->list); 2751 list_del_rcu(&binding->list);
2715 2752
2716 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && 2753 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2717 !(set->flags & NFT_SET_INACTIVE)) 2754 !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
3346 struct nft_set *set; 3383 struct nft_set *set;
3347 3384
3348 /* Bump generation counter, invalidate any dump in progress */ 3385 /* Bump generation counter, invalidate any dump in progress */
3349 net->nft.genctr++; 3386 while (++net->nft.base_seq == 0);
3350 3387
3351 /* A new generation has just started */ 3388 /* A new generation has just started */
3352 net->nft.gencursor = gencursor_next(net); 3389 net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3491 } 3528 }
3492 nft_trans_destroy(trans); 3529 nft_trans_destroy(trans);
3493 } else { 3530 } else {
3494 list_del(&trans->ctx.table->list); 3531 list_del_rcu(&trans->ctx.table->list);
3495 } 3532 }
3496 break; 3533 break;
3497 case NFT_MSG_DELTABLE: 3534 case NFT_MSG_DELTABLE:
3498 list_add_tail(&trans->ctx.table->list, 3535 list_add_tail_rcu(&trans->ctx.table->list,
3499 &trans->ctx.afi->tables); 3536 &trans->ctx.afi->tables);
3500 nft_trans_destroy(trans); 3537 nft_trans_destroy(trans);
3501 break; 3538 break;
3502 case NFT_MSG_NEWCHAIN: 3539 case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3507 nft_trans_destroy(trans); 3544 nft_trans_destroy(trans);
3508 } else { 3545 } else {
3509 trans->ctx.table->use--; 3546 trans->ctx.table->use--;
3510 list_del(&trans->ctx.chain->list); 3547 list_del_rcu(&trans->ctx.chain->list);
3511 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && 3548 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3512 trans->ctx.chain->flags & NFT_BASE_CHAIN) { 3549 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3513 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, 3550 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
3517 break; 3554 break;
3518 case NFT_MSG_DELCHAIN: 3555 case NFT_MSG_DELCHAIN:
3519 trans->ctx.table->use++; 3556 trans->ctx.table->use++;
3520 list_add_tail(&trans->ctx.chain->list, 3557 list_add_tail_rcu(&trans->ctx.chain->list,
3521 &trans->ctx.table->chains); 3558 &trans->ctx.table->chains);
3522 nft_trans_destroy(trans); 3559 nft_trans_destroy(trans);
3523 break; 3560 break;
3524 case NFT_MSG_NEWRULE: 3561 case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3532 break; 3569 break;
3533 case NFT_MSG_NEWSET: 3570 case NFT_MSG_NEWSET:
3534 trans->ctx.table->use--; 3571 trans->ctx.table->use--;
3535 list_del(&nft_trans_set(trans)->list); 3572 list_del_rcu(&nft_trans_set(trans)->list);
3536 break; 3573 break;
3537 case NFT_MSG_DELSET: 3574 case NFT_MSG_DELSET:
3538 trans->ctx.table->use++; 3575 trans->ctx.table->use++;
3539 list_add_tail(&nft_trans_set(trans)->list, 3576 list_add_tail_rcu(&nft_trans_set(trans)->list,
3540 &trans->ctx.table->sets); 3577 &trans->ctx.table->sets);
3541 nft_trans_destroy(trans); 3578 nft_trans_destroy(trans);
3542 break; 3579 break;
3543 case NFT_MSG_NEWSETELEM: 3580 case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
3951{ 3988{
3952 INIT_LIST_HEAD(&net->nft.af_info); 3989 INIT_LIST_HEAD(&net->nft.af_info);
3953 INIT_LIST_HEAD(&net->nft.commit_list); 3990 INIT_LIST_HEAD(&net->nft.commit_list);
3991 net->nft.base_seq = 1;
3954 return 0; 3992 return 0;
3955} 3993}
3956 3994
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb1720b..3b90eb2b2c55 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
109 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
110 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
112 struct nft_stats __percpu *stats; 112 struct nft_stats *stats;
113 int rulenum; 113 int rulenum;
114 /* 114 /*
115 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206 206
207 rcu_read_lock_bh(); 207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats); 208 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
209 __this_cpu_inc(stats->pkts); 209 u64_stats_update_begin(&stats->syncp);
210 __this_cpu_add(stats->bytes, pkt->skb->len); 210 stats->pkts++;
211 stats->bytes += pkt->skb->len;
212 u64_stats_update_end(&stats->syncp);
211 rcu_read_unlock_bh(); 213 rcu_read_unlock_bh();
212 214
213 return nft_base_chain(basechain)->policy; 215 return nft_base_chain(basechain)->policy;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583ace32..70c0be8d0121 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/bitmap.h>
41#include <net/netlink.h> 42#include <net/netlink.h>
42#include <net/act_api.h> 43#include <net/act_api.h>
43#include <net/pkt_cls.h> 44#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
460 return 0; 461 return 0;
461} 462}
462 463
464#define NR_U32_NODE (1<<12)
463static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) 465static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
464{ 466{
465 struct tc_u_knode *n; 467 struct tc_u_knode *n;
466 unsigned int i = 0x7FF; 468 unsigned long i;
469 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
470 GFP_KERNEL);
471 if (!bitmap)
472 return handle | 0xFFF;
467 473
468 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) 474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
469 if (i < TC_U32_NODE(n->handle)) 475 set_bit(TC_U32_NODE(n->handle), bitmap);
470 i = TC_U32_NODE(n->handle);
471 i++;
472 476
473 return handle | (i > 0xFFF ? 0xFFF : i); 477 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478 if (i >= NR_U32_NODE)
479 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
480
481 kfree(bitmap);
482 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
474} 483}
475 484
476static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 485static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index c342f7087147..ee53a42818ca 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
35 35
36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) 36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
37{ 37{
38 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 38 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
39 return pthread_mutex_lock(&lock->mutex); 39 return pthread_mutex_lock(&lock->mutex);
40} 40}
41 41
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
47 47
48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) 48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
49{ 49{
50 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 50 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; 51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
52} 52}
53 53
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a680ab8c2e36..4ec03f861551 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
36 36
37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) 37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
38{ 38{
39 lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 39 lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
40 return pthread_rwlock_rdlock(&lock->rwlock); 40 return pthread_rwlock_rdlock(&lock->rwlock);
41 41
42} 42}
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
49 49
50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) 50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
51{ 51{
52 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 52 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
53 return pthread_rwlock_wrlock(&lock->rwlock); 53 return pthread_rwlock_wrlock(&lock->rwlock);
54} 54}
55 55
56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) 56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
57{ 57{
58 lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 58 lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; 59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
60} 60}
61 61
62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) 62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
63{ 63{
64 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 64 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; 65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
66} 66}
67 67
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 23bd69cb5ade..6f803609e498 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
92static void init_preload(void); 92static void init_preload(void);
93static void try_init_preload(void) 93static void try_init_preload(void)
94{ 94{
95 if (!__init_state != done) 95 if (__init_state != done)
96 init_preload(); 96 init_preload();
97} 97}
98 98
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
252 252
253 try_init_preload(); 253 try_init_preload();
254 254
255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, 255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
256 (unsigned long)_RET_IP_); 256 (unsigned long)_RET_IP_);
257 /* 257 /*
258 * Here's the thing with pthread mutexes: unlike the kernel variant, 258 * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
281 281
282 try_init_preload(); 282 try_init_preload();
283 283
284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
285 r = ll_pthread_mutex_trylock(mutex); 285 r = ll_pthread_mutex_trylock(mutex);
286 if (r) 286 if (r)
287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
303 */ 303 */
304 r = ll_pthread_mutex_unlock(mutex); 304 r = ll_pthread_mutex_unlock(mutex);
305 if (r) 305 if (r)
306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
307 307
308 return r; 308 return r;
309} 309}
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
352 352
353 init_preload(); 353 init_preload();
354 354
355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
356 r = ll_pthread_rwlock_rdlock(rwlock); 356 r = ll_pthread_rwlock_rdlock(rwlock);
357 if (r) 357 if (r)
358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
366 366
367 init_preload(); 367 init_preload();
368 368
369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
370 r = ll_pthread_rwlock_tryrdlock(rwlock); 370 r = ll_pthread_rwlock_tryrdlock(rwlock);
371 if (r) 371 if (r)
372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
380 380
381 init_preload(); 381 init_preload();
382 382
383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
384 r = ll_pthread_rwlock_trywrlock(rwlock); 384 r = ll_pthread_rwlock_trywrlock(rwlock);
385 if (r) 385 if (r)
386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
394 394
395 init_preload(); 395 init_preload();
396 396
397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
398 r = ll_pthread_rwlock_wrlock(rwlock); 398 r = ll_pthread_rwlock_wrlock(rwlock);
399 if (r) 399 if (r)
400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
412 r = ll_pthread_rwlock_unlock(rwlock); 412 r = ll_pthread_rwlock_unlock(rwlock);
413 if (r) 413 if (r)
414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
415 415
416 return r; 416 return r;
417} 417}
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); 439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
440#endif 440#endif
441 441
442 printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
443
444 lockdep_init(); 442 lockdep_init();
445 443
446 __init_state = done; 444 __init_state = done;