aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt6
-rw-r--r--Documentation/networking/ip-sysctl.txt14
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi10
-rw-r--r--arch/x86/kvm/emulate.c8
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/boot/dts/lx200mx.dts16
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig131
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig135
-rw-r--r--arch/xtensa/include/asm/pgtable.h2
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h12
-rw-r--r--block/blk-merge.c19
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/ioprio.c14
-rw-r--r--block/scsi_ioctl.c8
-rw-r--r--drivers/acpi/blacklist.c8
-rw-r--r--drivers/ata/ahci.c28
-rw-r--r--drivers/ata/libahci.c78
-rw-r--r--drivers/ata/sata_rcar.c15
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/block/zram/zram_drv.c3
-rw-r--r--drivers/char/hw_random/pseries-rng.c11
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt.c4
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/crypto/caam/key_gen.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c8
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c2
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c17
-rw-r--r--drivers/gpu/drm/radeon/cik.c7
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c21
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c20
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rs690.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/ibmpowernv.c6
-rw-r--r--drivers/hwmon/pwm-fan.c13
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-raid.c17
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-thin.c16
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c2
-rw-r--r--drivers/md/persistent-data/dm-btree.c24
-rw-r--r--drivers/mfd/max77693.c14
-rw-r--r--drivers/mfd/rtsx_pcr.c2
-rw-r--r--drivers/mfd/stmpe.h2
-rw-r--r--drivers/mfd/twl4030-power.c52
-rw-r--r--drivers/mfd/viperboard.c5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c39
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c1
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/phy.c36
-rw-r--r--drivers/net/ppp/ppp_generic.c40
-rw-r--r--drivers/net/tun.c28
-rw-r--r--drivers/net/usb/asix_devices.c14
-rw-r--r--drivers/net/vxlan.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/hp_accel.c44
-rw-r--r--drivers/s390/kvm/virtio_ccw.c1
-rw-r--r--drivers/thermal/imx_thermal.c45
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c8
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c1
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h1
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/notify/fsnotify.c36
-rw-r--r--fs/notify/fsnotify.h4
-rw-r--r--fs/notify/inode_mark.c8
-rw-r--r--fs/notify/mark.c36
-rw-r--r--fs/notify/vfsmount_mark.c8
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/mfd/max77693-private.h7
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/page-isolation.h8
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/net/9p/transport.h1
-rw-r--r--include/net/udp_tunnel.h9
-rw-r--r--include/uapi/linux/Kbuild4
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--init/main.c2
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/trace/ring_buffer.c81
-rw-r--r--kernel/trace/trace.c33
-rw-r--r--lib/rhashtable.c10
-rw-r--r--mm/bootmem.c9
-rw-r--r--mm/compaction.c18
-rw-r--r--mm/internal.h25
-rw-r--r--mm/iov_iter.c4
-rw-r--r--mm/memory_hotplug.c26
-rw-r--r--mm/nobootmem.c8
-rw-r--r--mm/page_alloc.c68
-rw-r--r--mm/page_isolation.c43
-rw-r--r--mm/slab_common.c4
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/ceph/crypto.c169
-rw-r--r--net/ceph/osd_client.c7
-rw-r--r--net/dsa/slave.c7
-rw-r--r--net/ipv4/fou.c2
-rw-r--r--net/ipv4/geneve.c3
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/tcp_input.c60
-rw-r--r--net/ipv6/ip6_gre.c5
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/ip6_vti.c11
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/iface.c18
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c5
-rw-r--r--net/mac80211/rx.c14
-rw-r--r--net/mac80211/spectmgmt.c18
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--security/selinux/hooks.c7
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_conexant.c31
-rw-r--r--sound/pci/hda/patch_realtek.c105
-rw-r--r--sound/usb/mixer_quirks.c6
-rw-r--r--tools/testing/selftests/net/psock_fanout.c2
181 files changed, 2196 insertions, 833 deletions
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
index 1e6111333fa8..80ae87a0784b 100644
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
@@ -3,8 +3,10 @@
3Required properties: 3Required properties:
4- compatible : should contain one of the following: 4- compatible : should contain one of the following:
5 - "renesas,sata-r8a7779" for R-Car H1 5 - "renesas,sata-r8a7779" for R-Car H1
6 - "renesas,sata-r8a7790" for R-Car H2 6 - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
7 - "renesas,sata-r8a7791" for R-Car M2 7 - "renesas,sata-r8a7790" for R-Car H2 other than ES1
8 - "renesas,sata-r8a7791" for R-Car M2-W
9 - "renesas,sata-r8a7793" for R-Car M2-N
8- reg : address and length of the SATA registers; 10- reg : address and length of the SATA registers;
9- interrupts : must consist of one interrupt specifier. 11- interrupts : must consist of one interrupt specifier.
10 12
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 0307e2875f21..a476b08a43e0 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
56 0 - disabled 56 0 - disabled
57 1 - enabled 57 1 - enabled
58 58
59fwmark_reflect - BOOLEAN
60 Controls the fwmark of kernel-generated IPv4 reply packets that are not
61 associated with a socket for example, TCP RSTs or ICMP echo replies).
62 If unset, these packets have a fwmark of zero. If set, they have the
63 fwmark of the packet they are replying to.
64 Default: 0
65
59route/max_size - INTEGER 66route/max_size - INTEGER
60 Maximum number of routes allowed in the kernel. Increase 67 Maximum number of routes allowed in the kernel. Increase
61 this when using large numbers of interfaces and/or routes. 68 this when using large numbers of interfaces and/or routes.
@@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN
1201proxy_ndp - BOOLEAN 1208proxy_ndp - BOOLEAN
1202 Do proxy ndp. 1209 Do proxy ndp.
1203 1210
1211fwmark_reflect - BOOLEAN
1212 Controls the fwmark of kernel-generated IPv6 reply packets that are not
1213 associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
1214 If unset, these packets have a fwmark of zero. If set, they have the
1215 fwmark of the packet they are replying to.
1216 Default: 0
1217
1204conf/interface/*: 1218conf/interface/*:
1205 Change special settings per interface. 1219 Change special settings per interface.
1206 1220
diff --git a/MAINTAINERS b/MAINTAINERS
index ea4d0058fd1b..60b1163dba28 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4716,6 +4716,7 @@ L: linux-iio@vger.kernel.org
4716S: Maintained 4716S: Maintained
4717F: drivers/iio/ 4717F: drivers/iio/
4718F: drivers/staging/iio/ 4718F: drivers/staging/iio/
4719F: include/linux/iio/
4719 4720
4720IKANOS/ADI EAGLE ADSL USB DRIVER 4721IKANOS/ADI EAGLE ADSL USB DRIVER
4721M: Matthieu Castet <castet.matthieu@free.fr> 4722M: Matthieu Castet <castet.matthieu@free.fr>
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 295c72d52a1f..f1ad9c2ab2e9 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -599,7 +599,7 @@
599 compatible = "apm,xgene-enet"; 599 compatible = "apm,xgene-enet";
600 status = "disabled"; 600 status = "disabled";
601 reg = <0x0 0x17020000 0x0 0xd100>, 601 reg = <0x0 0x17020000 0x0 0xd100>,
602 <0x0 0X17030000 0x0 0X400>, 602 <0x0 0X17030000 0x0 0Xc300>,
603 <0x0 0X10000000 0x0 0X200>; 603 <0x0 0X10000000 0x0 0X200>;
604 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 604 reg-names = "enet_csr", "ring_csr", "ring_cmd";
605 interrupts = <0x0 0x3c 0x4>; 605 interrupts = <0x0 0x3c 0x4>;
@@ -624,9 +624,9 @@
624 sgenet0: ethernet@1f210000 { 624 sgenet0: ethernet@1f210000 {
625 compatible = "apm,xgene-enet"; 625 compatible = "apm,xgene-enet";
626 status = "disabled"; 626 status = "disabled";
627 reg = <0x0 0x1f210000 0x0 0x10000>, 627 reg = <0x0 0x1f210000 0x0 0xd100>,
628 <0x0 0x1f200000 0x0 0X10000>, 628 <0x0 0x1f200000 0x0 0Xc300>,
629 <0x0 0x1B000000 0x0 0X20000>; 629 <0x0 0x1B000000 0x0 0X200>;
630 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 630 reg-names = "enet_csr", "ring_csr", "ring_cmd";
631 interrupts = <0x0 0xA0 0x4>; 631 interrupts = <0x0 0xA0 0x4>;
632 dma-coherent; 632 dma-coherent;
@@ -639,7 +639,7 @@
639 compatible = "apm,xgene-enet"; 639 compatible = "apm,xgene-enet";
640 status = "disabled"; 640 status = "disabled";
641 reg = <0x0 0x1f610000 0x0 0xd100>, 641 reg = <0x0 0x1f610000 0x0 0xd100>,
642 <0x0 0x1f600000 0x0 0X400>, 642 <0x0 0x1f600000 0x0 0Xc300>,
643 <0x0 0x18000000 0x0 0X200>; 643 <0x0 0x18000000 0x0 0X200>;
644 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 644 reg-names = "enet_csr", "ring_csr", "ring_cmd";
645 interrupts = <0x0 0x60 0x4>; 645 interrupts = <0x0 0x60 0x4>;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5edf088ca51e..9f8a2faf5040 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -4287,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4287 fetch_register_operand(op); 4287 fetch_register_operand(op);
4288 break; 4288 break;
4289 case OpCL: 4289 case OpCL:
4290 op->type = OP_IMM;
4290 op->bytes = 1; 4291 op->bytes = 1;
4291 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4292 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4292 break; 4293 break;
@@ -4294,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4294 rc = decode_imm(ctxt, op, 1, true); 4295 rc = decode_imm(ctxt, op, 1, true);
4295 break; 4296 break;
4296 case OpOne: 4297 case OpOne:
4298 op->type = OP_IMM;
4297 op->bytes = 1; 4299 op->bytes = 1;
4298 op->val = 1; 4300 op->val = 1;
4299 break; 4301 break;
@@ -4352,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4352 ctxt->memop.bytes = ctxt->op_bytes + 2; 4354 ctxt->memop.bytes = ctxt->op_bytes + 2;
4353 goto mem_common; 4355 goto mem_common;
4354 case OpES: 4356 case OpES:
4357 op->type = OP_IMM;
4355 op->val = VCPU_SREG_ES; 4358 op->val = VCPU_SREG_ES;
4356 break; 4359 break;
4357 case OpCS: 4360 case OpCS:
4361 op->type = OP_IMM;
4358 op->val = VCPU_SREG_CS; 4362 op->val = VCPU_SREG_CS;
4359 break; 4363 break;
4360 case OpSS: 4364 case OpSS:
4365 op->type = OP_IMM;
4361 op->val = VCPU_SREG_SS; 4366 op->val = VCPU_SREG_SS;
4362 break; 4367 break;
4363 case OpDS: 4368 case OpDS:
4369 op->type = OP_IMM;
4364 op->val = VCPU_SREG_DS; 4370 op->val = VCPU_SREG_DS;
4365 break; 4371 break;
4366 case OpFS: 4372 case OpFS:
4373 op->type = OP_IMM;
4367 op->val = VCPU_SREG_FS; 4374 op->val = VCPU_SREG_FS;
4368 break; 4375 break;
4369 case OpGS: 4376 case OpGS:
4377 op->type = OP_IMM;
4370 op->val = VCPU_SREG_GS; 4378 op->val = VCPU_SREG_GS;
4371 break; 4379 break;
4372 case OpImplicit: 4380 case OpImplicit:
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 49c6c3d94449..81f57e8c8f1b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -319,8 +319,8 @@ config XTENSA_PLATFORM_S6105
319 319
320config XTENSA_PLATFORM_XTFPGA 320config XTENSA_PLATFORM_XTFPGA
321 bool "XTFPGA" 321 bool "XTFPGA"
322 select ETHOC if ETHERNET
322 select SERIAL_CONSOLE 323 select SERIAL_CONSOLE
323 select ETHOC
324 select XTENSA_CALIBRATE_CCOUNT 324 select XTENSA_CALIBRATE_CCOUNT
325 help 325 help
326 XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605). 326 XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
@@ -367,7 +367,7 @@ config BUILTIN_DTB
367config BLK_DEV_SIMDISK 367config BLK_DEV_SIMDISK
368 tristate "Host file-based simulated block device support" 368 tristate "Host file-based simulated block device support"
369 default n 369 default n
370 depends on XTENSA_PLATFORM_ISS 370 depends on XTENSA_PLATFORM_ISS && BLOCK
371 help 371 help
372 Create block devices that map to files in the host file system. 372 Create block devices that map to files in the host file system.
373 Device binding to host file may be changed at runtime via proc 373 Device binding to host file may be changed at runtime via proc
diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts
new file mode 100644
index 000000000000..249822b99bd6
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx200mx.dts
@@ -0,0 +1,16 @@
1/dts-v1/;
2/include/ "xtfpga.dtsi"
3/include/ "xtfpga-flash-16m.dtsi"
4
5/ {
6 compatible = "cdns,xtensa-lx200";
7 memory@0 {
8 device_type = "memory";
9 reg = <0x00000000 0x06000000>;
10 };
11 pic: pic {
12 compatible = "cdns,xtensa-mx";
13 #interrupt-cells = <2>;
14 interrupt-controller;
15 };
16};
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
new file mode 100644
index 000000000000..f4b7b3888da8
--- /dev/null
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -0,0 +1,131 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_IRQ_DOMAIN_DEBUG=y
5CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_IRQ_TIME_ACCOUNTING=y
8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_CGROUP_DEBUG=y
10CONFIG_CGROUP_FREEZER=y
11CONFIG_CGROUP_DEVICE=y
12CONFIG_CPUSETS=y
13CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y
15CONFIG_MEMCG=y
16CONFIG_NAMESPACES=y
17CONFIG_SCHED_AUTOGROUP=y
18CONFIG_RELAY=y
19CONFIG_BLK_DEV_INITRD=y
20CONFIG_EXPERT=y
21CONFIG_SYSCTL_SYSCALL=y
22CONFIG_KALLSYMS_ALL=y
23CONFIG_PROFILING=y
24CONFIG_OPROFILE=y
25CONFIG_MODULES=y
26CONFIG_MODULE_UNLOAD=y
27# CONFIG_IOSCHED_DEADLINE is not set
28# CONFIG_IOSCHED_CFQ is not set
29CONFIG_XTENSA_VARIANT_DC233C=y
30CONFIG_XTENSA_UNALIGNED_USER=y
31CONFIG_PREEMPT=y
32CONFIG_HIGHMEM=y
33# CONFIG_PCI is not set
34CONFIG_XTENSA_PLATFORM_XTFPGA=y
35CONFIG_CMDLINE_BOOL=y
36CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
37CONFIG_USE_OF=y
38CONFIG_BUILTIN_DTB="kc705"
39# CONFIG_COMPACTION is not set
40# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
41CONFIG_NET=y
42CONFIG_PACKET=y
43CONFIG_UNIX=y
44CONFIG_INET=y
45CONFIG_IP_MULTICAST=y
46CONFIG_IP_PNP=y
47CONFIG_IP_PNP_DHCP=y
48CONFIG_IP_PNP_BOOTP=y
49CONFIG_IP_PNP_RARP=y
50# CONFIG_IPV6 is not set
51CONFIG_NETFILTER=y
52# CONFIG_WIRELESS is not set
53CONFIG_DEVTMPFS=y
54CONFIG_DEVTMPFS_MOUNT=y
55# CONFIG_STANDALONE is not set
56CONFIG_MTD=y
57CONFIG_MTD_CFI=y
58CONFIG_MTD_JEDECPROBE=y
59CONFIG_MTD_CFI_INTELEXT=y
60CONFIG_MTD_CFI_AMDSTD=y
61CONFIG_MTD_CFI_STAA=y
62CONFIG_MTD_PHYSMAP_OF=y
63CONFIG_MTD_UBI=y
64CONFIG_BLK_DEV_LOOP=y
65CONFIG_BLK_DEV_RAM=y
66CONFIG_SCSI=y
67CONFIG_BLK_DEV_SD=y
68CONFIG_NETDEVICES=y
69# CONFIG_NET_VENDOR_ARC is not set
70# CONFIG_NET_VENDOR_BROADCOM is not set
71# CONFIG_NET_VENDOR_INTEL is not set
72# CONFIG_NET_VENDOR_MARVELL is not set
73# CONFIG_NET_VENDOR_MICREL is not set
74# CONFIG_NET_VENDOR_NATSEMI is not set
75# CONFIG_NET_VENDOR_SAMSUNG is not set
76# CONFIG_NET_VENDOR_SEEQ is not set
77# CONFIG_NET_VENDOR_SMSC is not set
78# CONFIG_NET_VENDOR_STMICRO is not set
79# CONFIG_NET_VENDOR_VIA is not set
80# CONFIG_NET_VENDOR_WIZNET is not set
81CONFIG_MARVELL_PHY=y
82# CONFIG_WLAN is not set
83# CONFIG_INPUT_MOUSEDEV is not set
84# CONFIG_INPUT_KEYBOARD is not set
85# CONFIG_INPUT_MOUSE is not set
86# CONFIG_SERIO is not set
87CONFIG_SERIAL_8250=y
88# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
89CONFIG_SERIAL_8250_CONSOLE=y
90CONFIG_SERIAL_OF_PLATFORM=y
91CONFIG_HW_RANDOM=y
92# CONFIG_HWMON is not set
93CONFIG_WATCHDOG=y
94CONFIG_WATCHDOG_NOWAYOUT=y
95CONFIG_SOFT_WATCHDOG=y
96# CONFIG_VGA_CONSOLE is not set
97# CONFIG_USB_SUPPORT is not set
98# CONFIG_IOMMU_SUPPORT is not set
99CONFIG_EXT3_FS=y
100CONFIG_EXT4_FS=y
101CONFIG_FANOTIFY=y
102CONFIG_VFAT_FS=y
103CONFIG_PROC_KCORE=y
104CONFIG_TMPFS=y
105CONFIG_TMPFS_POSIX_ACL=y
106CONFIG_UBIFS_FS=y
107CONFIG_NFS_FS=y
108CONFIG_NFS_V4=y
109CONFIG_NFS_SWAP=y
110CONFIG_ROOT_NFS=y
111CONFIG_SUNRPC_DEBUG=y
112CONFIG_NLS_CODEPAGE_437=y
113CONFIG_NLS_ISO8859_1=y
114CONFIG_PRINTK_TIME=y
115CONFIG_DYNAMIC_DEBUG=y
116CONFIG_DEBUG_INFO=y
117CONFIG_MAGIC_SYSRQ=y
118CONFIG_LOCKUP_DETECTOR=y
119# CONFIG_SCHED_DEBUG is not set
120CONFIG_SCHEDSTATS=y
121CONFIG_TIMER_STATS=y
122CONFIG_DEBUG_RT_MUTEXES=y
123CONFIG_DEBUG_SPINLOCK=y
124CONFIG_DEBUG_MUTEXES=y
125CONFIG_DEBUG_ATOMIC_SLEEP=y
126CONFIG_STACKTRACE=y
127CONFIG_RCU_TRACE=y
128# CONFIG_FTRACE is not set
129CONFIG_LD_NO_RELAX=y
130# CONFIG_S32C1I_SELFTEST is not set
131CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
new file mode 100644
index 000000000000..22eeacba37cc
--- /dev/null
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -0,0 +1,135 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_IRQ_DOMAIN_DEBUG=y
5CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_IRQ_TIME_ACCOUNTING=y
8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_CGROUP_DEBUG=y
10CONFIG_CGROUP_FREEZER=y
11CONFIG_CGROUP_DEVICE=y
12CONFIG_CPUSETS=y
13CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y
15CONFIG_MEMCG=y
16CONFIG_NAMESPACES=y
17CONFIG_SCHED_AUTOGROUP=y
18CONFIG_RELAY=y
19CONFIG_BLK_DEV_INITRD=y
20CONFIG_EXPERT=y
21CONFIG_SYSCTL_SYSCALL=y
22CONFIG_KALLSYMS_ALL=y
23CONFIG_PROFILING=y
24CONFIG_OPROFILE=y
25CONFIG_MODULES=y
26CONFIG_MODULE_UNLOAD=y
27# CONFIG_IOSCHED_DEADLINE is not set
28# CONFIG_IOSCHED_CFQ is not set
29CONFIG_XTENSA_VARIANT_CUSTOM=y
30CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
31CONFIG_XTENSA_UNALIGNED_USER=y
32CONFIG_PREEMPT=y
33CONFIG_HAVE_SMP=y
34CONFIG_SMP=y
35CONFIG_HOTPLUG_CPU=y
36# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
37# CONFIG_PCI is not set
38CONFIG_XTENSA_PLATFORM_XTFPGA=y
39CONFIG_CMDLINE_BOOL=y
40CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
41CONFIG_USE_OF=y
42CONFIG_BUILTIN_DTB="lx200mx"
43# CONFIG_COMPACTION is not set
44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
45CONFIG_NET=y
46CONFIG_PACKET=y
47CONFIG_UNIX=y
48CONFIG_INET=y
49CONFIG_IP_MULTICAST=y
50CONFIG_IP_PNP=y
51CONFIG_IP_PNP_DHCP=y
52CONFIG_IP_PNP_BOOTP=y
53CONFIG_IP_PNP_RARP=y
54# CONFIG_IPV6 is not set
55CONFIG_NETFILTER=y
56# CONFIG_WIRELESS is not set
57CONFIG_DEVTMPFS=y
58CONFIG_DEVTMPFS_MOUNT=y
59# CONFIG_STANDALONE is not set
60CONFIG_MTD=y
61CONFIG_MTD_CFI=y
62CONFIG_MTD_JEDECPROBE=y
63CONFIG_MTD_CFI_INTELEXT=y
64CONFIG_MTD_CFI_AMDSTD=y
65CONFIG_MTD_CFI_STAA=y
66CONFIG_MTD_PHYSMAP_OF=y
67CONFIG_MTD_UBI=y
68CONFIG_BLK_DEV_LOOP=y
69CONFIG_BLK_DEV_RAM=y
70CONFIG_SCSI=y
71CONFIG_BLK_DEV_SD=y
72CONFIG_NETDEVICES=y
73# CONFIG_NET_VENDOR_ARC is not set
74# CONFIG_NET_VENDOR_BROADCOM is not set
75# CONFIG_NET_VENDOR_INTEL is not set
76# CONFIG_NET_VENDOR_MARVELL is not set
77# CONFIG_NET_VENDOR_MICREL is not set
78# CONFIG_NET_VENDOR_NATSEMI is not set
79# CONFIG_NET_VENDOR_SAMSUNG is not set
80# CONFIG_NET_VENDOR_SEEQ is not set
81# CONFIG_NET_VENDOR_SMSC is not set
82# CONFIG_NET_VENDOR_STMICRO is not set
83# CONFIG_NET_VENDOR_VIA is not set
84# CONFIG_NET_VENDOR_WIZNET is not set
85CONFIG_MARVELL_PHY=y
86# CONFIG_WLAN is not set
87# CONFIG_INPUT_MOUSEDEV is not set
88# CONFIG_INPUT_KEYBOARD is not set
89# CONFIG_INPUT_MOUSE is not set
90# CONFIG_SERIO is not set
91CONFIG_SERIAL_8250=y
92# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
93CONFIG_SERIAL_8250_CONSOLE=y
94CONFIG_SERIAL_OF_PLATFORM=y
95CONFIG_HW_RANDOM=y
96# CONFIG_HWMON is not set
97CONFIG_WATCHDOG=y
98CONFIG_WATCHDOG_NOWAYOUT=y
99CONFIG_SOFT_WATCHDOG=y
100# CONFIG_VGA_CONSOLE is not set
101# CONFIG_USB_SUPPORT is not set
102# CONFIG_IOMMU_SUPPORT is not set
103CONFIG_EXT3_FS=y
104CONFIG_EXT4_FS=y
105CONFIG_FANOTIFY=y
106CONFIG_VFAT_FS=y
107CONFIG_PROC_KCORE=y
108CONFIG_TMPFS=y
109CONFIG_TMPFS_POSIX_ACL=y
110CONFIG_UBIFS_FS=y
111CONFIG_NFS_FS=y
112CONFIG_NFS_V4=y
113CONFIG_NFS_SWAP=y
114CONFIG_ROOT_NFS=y
115CONFIG_SUNRPC_DEBUG=y
116CONFIG_NLS_CODEPAGE_437=y
117CONFIG_NLS_ISO8859_1=y
118CONFIG_PRINTK_TIME=y
119CONFIG_DYNAMIC_DEBUG=y
120CONFIG_DEBUG_INFO=y
121CONFIG_MAGIC_SYSRQ=y
122CONFIG_DEBUG_VM=y
123CONFIG_LOCKUP_DETECTOR=y
124CONFIG_SCHEDSTATS=y
125CONFIG_TIMER_STATS=y
126CONFIG_DEBUG_RT_MUTEXES=y
127CONFIG_DEBUG_SPINLOCK=y
128CONFIG_DEBUG_MUTEXES=y
129CONFIG_DEBUG_ATOMIC_SLEEP=y
130CONFIG_STACKTRACE=y
131CONFIG_RCU_TRACE=y
132# CONFIG_FTRACE is not set
133CONFIG_LD_NO_RELAX=y
134# CONFIG_S32C1I_SELFTEST is not set
135CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index b2173e5da601..0383aed59121 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -277,6 +277,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
277static inline pte_t pte_mkspecial(pte_t pte) 277static inline pte_t pte_mkspecial(pte_t pte)
278 { return pte; } 278 { return pte; }
279 279
280#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
281
280/* 282/*
281 * Conversion functions: convert a page and protection to a page entry, 283 * Conversion functions: convert a page and protection to a page entry,
282 * and a page entry and page directory to the page they refer to. 284 * and a page entry and page directory to the page they refer to.
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 8883fc877c5c..db5bb72e2f4e 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
384#define __NR_pivot_root 175 384#define __NR_pivot_root 175
385__SYSCALL(175, sys_pivot_root, 2) 385__SYSCALL(175, sys_pivot_root, 2)
386#define __NR_umount 176 386#define __NR_umount 176
387__SYSCALL(176, sys_umount, 2) 387__SYSCALL(176, sys_oldumount, 1)
388#define __ARCH_WANT_SYS_OLDUMOUNT
388#define __NR_swapoff 177 389#define __NR_swapoff 177
389__SYSCALL(177, sys_swapoff, 1) 390__SYSCALL(177, sys_swapoff, 1)
390#define __NR_sync 178 391#define __NR_sync 178
@@ -742,7 +743,14 @@ __SYSCALL(335, sys_sched_getattr, 3)
742#define __NR_renameat2 336 743#define __NR_renameat2 336
743__SYSCALL(336, sys_renameat2, 5) 744__SYSCALL(336, sys_renameat2, 5)
744 745
745#define __NR_syscall_count 337 746#define __NR_seccomp 337
747__SYSCALL(337, sys_seccomp, 3)
748#define __NR_getrandom 338
749__SYSCALL(338, sys_getrandom, 3)
750#define __NR_memfd_create 339
751__SYSCALL(339, sys_memfd_create, 2)
752
753#define __NR_syscall_count 340
746 754
747/* 755/*
748 * sysxtensa syscall handler 756 * sysxtensa syscall handler
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b3ac40aef46b..89b97b5e0881 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -97,19 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
97 97
98void blk_recount_segments(struct request_queue *q, struct bio *bio) 98void blk_recount_segments(struct request_queue *q, struct bio *bio)
99{ 99{
100 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, 100 unsigned short seg_cnt;
101 &q->queue_flags); 101
102 bool merge_not_need = bio->bi_vcnt < queue_max_segments(q); 102 /* estimate segment number by bi_vcnt for non-cloned bio */
103 if (bio_flagged(bio, BIO_CLONED))
104 seg_cnt = bio_segments(bio);
105 else
106 seg_cnt = bio->bi_vcnt;
103 107
104 if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) && 108 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
105 merge_not_need) 109 (seg_cnt < queue_max_segments(q)))
106 bio->bi_phys_segments = bio->bi_vcnt; 110 bio->bi_phys_segments = seg_cnt;
107 else { 111 else {
108 struct bio *nxt = bio->bi_next; 112 struct bio *nxt = bio->bi_next;
109 113
110 bio->bi_next = NULL; 114 bio->bi_next = NULL;
111 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, 115 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
112 no_sg_merge && merge_not_need);
113 bio->bi_next = nxt; 116 bio->bi_next = nxt;
114 } 117 }
115 118
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 68929bad9a6a..1d016fc9a8b6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
107 wake_up_all(&q->mq_freeze_wq); 107 wake_up_all(&q->mq_freeze_wq);
108} 108}
109 109
110/* 110static void blk_mq_freeze_queue_start(struct request_queue *q)
111 * Guarantee no request is in use, so we can change any data structure of
112 * the queue afterward.
113 */
114void blk_mq_freeze_queue(struct request_queue *q)
115{ 111{
116 bool freeze; 112 bool freeze;
117 113
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
123 percpu_ref_kill(&q->mq_usage_counter); 119 percpu_ref_kill(&q->mq_usage_counter);
124 blk_mq_run_queues(q, false); 120 blk_mq_run_queues(q, false);
125 } 121 }
122}
123
124static void blk_mq_freeze_queue_wait(struct request_queue *q)
125{
126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127} 127}
128 128
129/*
130 * Guarantee no request is in use, so we can change any data structure of
131 * the queue afterward.
132 */
133void blk_mq_freeze_queue(struct request_queue *q)
134{
135 blk_mq_freeze_queue_start(q);
136 blk_mq_freeze_queue_wait(q);
137}
138
129static void blk_mq_unfreeze_queue(struct request_queue *q) 139static void blk_mq_unfreeze_queue(struct request_queue *q)
130{ 140{
131 bool wake; 141 bool wake;
@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
1921/* Basically redo blk_mq_init_queue with queue frozen */ 1931/* Basically redo blk_mq_init_queue with queue frozen */
1922static void blk_mq_queue_reinit(struct request_queue *q) 1932static void blk_mq_queue_reinit(struct request_queue *q)
1923{ 1933{
1924 blk_mq_freeze_queue(q); 1934 WARN_ON_ONCE(!q->mq_freeze_depth);
1925 1935
1926 blk_mq_sysfs_unregister(q); 1936 blk_mq_sysfs_unregister(q);
1927 1937
@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
1936 blk_mq_map_swqueue(q); 1946 blk_mq_map_swqueue(q);
1937 1947
1938 blk_mq_sysfs_register(q); 1948 blk_mq_sysfs_register(q);
1939
1940 blk_mq_unfreeze_queue(q);
1941} 1949}
1942 1950
1943static int blk_mq_queue_reinit_notify(struct notifier_block *nb, 1951static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1956 return NOTIFY_OK; 1964 return NOTIFY_OK;
1957 1965
1958 mutex_lock(&all_q_mutex); 1966 mutex_lock(&all_q_mutex);
1967
1968 /*
1969 * We need to freeze and reinit all existing queues. Freezing
1970 * involves synchronous wait for an RCU grace period and doing it
1971 * one by one may take a long time. Start freezing all queues in
1972 * one swoop and then wait for the completions so that freezing can
1973 * take place in parallel.
1974 */
1975 list_for_each_entry(q, &all_q_list, all_q_node)
1976 blk_mq_freeze_queue_start(q);
1977 list_for_each_entry(q, &all_q_list, all_q_node)
1978 blk_mq_freeze_queue_wait(q);
1979
1959 list_for_each_entry(q, &all_q_list, all_q_node) 1980 list_for_each_entry(q, &all_q_list, all_q_node)
1960 blk_mq_queue_reinit(q); 1981 blk_mq_queue_reinit(q);
1982
1983 list_for_each_entry(q, &all_q_list, all_q_node)
1984 blk_mq_unfreeze_queue(q);
1985
1961 mutex_unlock(&all_q_mutex); 1986 mutex_unlock(&all_q_mutex);
1962 return NOTIFY_OK; 1987 return NOTIFY_OK;
1963} 1988}
diff --git a/block/ioprio.c b/block/ioprio.c
index e50170ca7c33..31666c92b46a 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -157,14 +157,16 @@ out:
157 157
158int ioprio_best(unsigned short aprio, unsigned short bprio) 158int ioprio_best(unsigned short aprio, unsigned short bprio)
159{ 159{
160 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); 160 unsigned short aclass;
161 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); 161 unsigned short bclass;
162 162
163 if (aclass == IOPRIO_CLASS_NONE) 163 if (!ioprio_valid(aprio))
164 aclass = IOPRIO_CLASS_BE; 164 aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
165 if (bclass == IOPRIO_CLASS_NONE) 165 if (!ioprio_valid(bprio))
166 bclass = IOPRIO_CLASS_BE; 166 bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
167 167
168 aclass = IOPRIO_PRIO_CLASS(aprio);
169 bclass = IOPRIO_PRIO_CLASS(bprio);
168 if (aclass == bclass) 170 if (aclass == bclass)
169 return min(aprio, bprio); 171 return min(aprio, bprio);
170 if (aclass > bclass) 172 if (aclass > bclass)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 1e053d911240..b0c2a616c8f9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
458 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 458 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
459 if (IS_ERR(rq)) { 459 if (IS_ERR(rq)) {
460 err = PTR_ERR(rq); 460 err = PTR_ERR(rq);
461 goto error; 461 goto error_free_buffer;
462 } 462 }
463 blk_rq_set_block_pc(rq); 463 blk_rq_set_block_pc(rq);
464 464
@@ -531,9 +531,11 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
531 } 531 }
532 532
533error: 533error:
534 blk_put_request(rq);
535
536error_free_buffer:
534 kfree(buffer); 537 kfree(buffer);
535 if (rq) 538
536 blk_put_request(rq);
537 return err; 539 return err;
538} 540}
539EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 541EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index ed122e17636e..7556e7c4a055 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -290,6 +290,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
290 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"), 290 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
291 }, 291 },
292 }, 292 },
293 {
294 .callback = dmi_disable_osi_win8,
295 .ident = "Dell Vostro 3546",
296 .matches = {
297 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
298 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
299 },
300 },
293 301
294 /* 302 /*
295 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 303 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5f039f191067..e45f83789809 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
60 /* board IDs by feature in alphabetical order */ 60 /* board IDs by feature in alphabetical order */
61 board_ahci, 61 board_ahci,
62 board_ahci_ign_iferr, 62 board_ahci_ign_iferr,
63 board_ahci_nomsi,
63 board_ahci_noncq, 64 board_ahci_noncq,
64 board_ahci_nosntf, 65 board_ahci_nosntf,
65 board_ahci_yes_fbs, 66 board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
121 .udma_mask = ATA_UDMA6, 122 .udma_mask = ATA_UDMA6,
122 .port_ops = &ahci_ops, 123 .port_ops = &ahci_ops,
123 }, 124 },
125 [board_ahci_nomsi] = {
126 AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
127 .flags = AHCI_FLAG_COMMON,
128 .pio_mask = ATA_PIO4,
129 .udma_mask = ATA_UDMA6,
130 .port_ops = &ahci_ops,
131 },
124 [board_ahci_noncq] = { 132 [board_ahci_noncq] = {
125 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), 133 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
126 .flags = AHCI_FLAG_COMMON, 134 .flags = AHCI_FLAG_COMMON,
@@ -313,6 +321,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
313 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ 321 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
314 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ 322 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
315 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ 323 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
324 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
325 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
326 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
327 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
328 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
316 329
317 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 330 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
318 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 331 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -475,10 +488,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
475 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 488 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
476 489
477 /* 490 /*
478 * Samsung SSDs found on some macbooks. NCQ times out. 491 * Samsung SSDs found on some macbooks. NCQ times out if MSI is
479 * https://bugzilla.kernel.org/show_bug.cgi?id=60731 492 * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
480 */ 493 */
481 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, 494 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
482 495
483 /* Enmotus */ 496 /* Enmotus */
484 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, 497 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -514,12 +527,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
514static void ahci_pci_save_initial_config(struct pci_dev *pdev, 527static void ahci_pci_save_initial_config(struct pci_dev *pdev,
515 struct ahci_host_priv *hpriv) 528 struct ahci_host_priv *hpriv)
516{ 529{
517 unsigned int force_port_map = 0;
518 unsigned int mask_port_map = 0;
519
520 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { 530 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
521 dev_info(&pdev->dev, "JMB361 has only one port\n"); 531 dev_info(&pdev->dev, "JMB361 has only one port\n");
522 force_port_map = 1; 532 hpriv->force_port_map = 1;
523 } 533 }
524 534
525 /* 535 /*
@@ -529,9 +539,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
529 */ 539 */
530 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 540 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
531 if (pdev->device == 0x6121) 541 if (pdev->device == 0x6121)
532 mask_port_map = 0x3; 542 hpriv->mask_port_map = 0x3;
533 else 543 else
534 mask_port_map = 0xf; 544 hpriv->mask_port_map = 0xf;
535 dev_info(&pdev->dev, 545 dev_info(&pdev->dev,
536 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); 546 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
537 } 547 }
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 5eb61c9e63da..97683e45ab04 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1778,16 +1778,15 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
1778 } 1778 }
1779} 1779}
1780 1780
1781static void ahci_update_intr_status(struct ata_port *ap) 1781static void ahci_port_intr(struct ata_port *ap)
1782{ 1782{
1783 void __iomem *port_mmio = ahci_port_base(ap); 1783 void __iomem *port_mmio = ahci_port_base(ap);
1784 struct ahci_port_priv *pp = ap->private_data;
1785 u32 status; 1784 u32 status;
1786 1785
1787 status = readl(port_mmio + PORT_IRQ_STAT); 1786 status = readl(port_mmio + PORT_IRQ_STAT);
1788 writel(status, port_mmio + PORT_IRQ_STAT); 1787 writel(status, port_mmio + PORT_IRQ_STAT);
1789 1788
1790 atomic_or(status, &pp->intr_status); 1789 ahci_handle_port_interrupt(ap, port_mmio, status);
1791} 1790}
1792 1791
1793static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance) 1792static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
@@ -1808,34 +1807,6 @@ static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
1808 return IRQ_HANDLED; 1807 return IRQ_HANDLED;
1809} 1808}
1810 1809
1811irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
1812{
1813 struct ata_host *host = dev_instance;
1814 struct ahci_host_priv *hpriv = host->private_data;
1815 u32 irq_masked = hpriv->port_map;
1816 unsigned int i;
1817
1818 for (i = 0; i < host->n_ports; i++) {
1819 struct ata_port *ap;
1820
1821 if (!(irq_masked & (1 << i)))
1822 continue;
1823
1824 ap = host->ports[i];
1825 if (ap) {
1826 ahci_port_thread_fn(irq, ap);
1827 VPRINTK("port %u\n", i);
1828 } else {
1829 VPRINTK("port %u (no irq)\n", i);
1830 if (ata_ratelimit())
1831 dev_warn(host->dev,
1832 "interrupt on disabled port %u\n", i);
1833 }
1834 }
1835
1836 return IRQ_HANDLED;
1837}
1838
1839static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance) 1810static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance)
1840{ 1811{
1841 struct ata_port *ap = dev_instance; 1812 struct ata_port *ap = dev_instance;
@@ -1875,6 +1846,8 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
1875 1846
1876 irq_masked = irq_stat & hpriv->port_map; 1847 irq_masked = irq_stat & hpriv->port_map;
1877 1848
1849 spin_lock(&host->lock);
1850
1878 for (i = 0; i < host->n_ports; i++) { 1851 for (i = 0; i < host->n_ports; i++) {
1879 struct ata_port *ap; 1852 struct ata_port *ap;
1880 1853
@@ -1883,7 +1856,7 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
1883 1856
1884 ap = host->ports[i]; 1857 ap = host->ports[i];
1885 if (ap) { 1858 if (ap) {
1886 ahci_update_intr_status(ap); 1859 ahci_port_intr(ap);
1887 VPRINTK("port %u\n", i); 1860 VPRINTK("port %u\n", i);
1888 } else { 1861 } else {
1889 VPRINTK("port %u (no irq)\n", i); 1862 VPRINTK("port %u (no irq)\n", i);
@@ -1906,9 +1879,11 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
1906 */ 1879 */
1907 writel(irq_stat, mmio + HOST_IRQ_STAT); 1880 writel(irq_stat, mmio + HOST_IRQ_STAT);
1908 1881
1882 spin_unlock(&host->lock);
1883
1909 VPRINTK("EXIT\n"); 1884 VPRINTK("EXIT\n");
1910 1885
1911 return handled ? IRQ_WAKE_THREAD : IRQ_NONE; 1886 return IRQ_RETVAL(handled);
1912} 1887}
1913 1888
1914unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1889unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
@@ -2320,8 +2295,13 @@ static int ahci_port_start(struct ata_port *ap)
2320 */ 2295 */
2321 pp->intr_mask = DEF_PORT_IRQ; 2296 pp->intr_mask = DEF_PORT_IRQ;
2322 2297
2323 spin_lock_init(&pp->lock); 2298 /*
2324 ap->lock = &pp->lock; 2299 * Switch to per-port locking in case each port has its own MSI vector.
2300 */
2301 if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
2302 spin_lock_init(&pp->lock);
2303 ap->lock = &pp->lock;
2304 }
2325 2305
2326 ap->private_data = pp; 2306 ap->private_data = pp;
2327 2307
@@ -2482,31 +2462,6 @@ out_free_irqs:
2482 return rc; 2462 return rc;
2483} 2463}
2484 2464
2485static int ahci_host_activate_single_irq(struct ata_host *host, int irq,
2486 struct scsi_host_template *sht)
2487{
2488 int i, rc;
2489
2490 rc = ata_host_start(host);
2491 if (rc)
2492 return rc;
2493
2494 rc = devm_request_threaded_irq(host->dev, irq, ahci_single_irq_intr,
2495 ahci_thread_fn, IRQF_SHARED,
2496 dev_driver_string(host->dev), host);
2497 if (rc)
2498 return rc;
2499
2500 for (i = 0; i < host->n_ports; i++)
2501 ata_port_desc(host->ports[i], "irq %d", irq);
2502
2503 rc = ata_host_register(host, sht);
2504 if (rc)
2505 devm_free_irq(host->dev, irq, host);
2506
2507 return rc;
2508}
2509
2510/** 2465/**
2511 * ahci_host_activate - start AHCI host, request IRQs and register it 2466 * ahci_host_activate - start AHCI host, request IRQs and register it
2512 * @host: target ATA host 2467 * @host: target ATA host
@@ -2532,7 +2487,8 @@ int ahci_host_activate(struct ata_host *host, int irq,
2532 if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) 2487 if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
2533 rc = ahci_host_activate_multi_irqs(host, irq, sht); 2488 rc = ahci_host_activate_multi_irqs(host, irq, sht);
2534 else 2489 else
2535 rc = ahci_host_activate_single_irq(host, irq, sht); 2490 rc = ata_host_activate(host, irq, ahci_single_irq_intr,
2491 IRQF_SHARED, sht);
2536 return rc; 2492 return rc;
2537} 2493}
2538EXPORT_SYMBOL_GPL(ahci_host_activate); 2494EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 61eb6d77dac7..ea1fbc1d4c5f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -146,6 +146,7 @@
146enum sata_rcar_type { 146enum sata_rcar_type {
147 RCAR_GEN1_SATA, 147 RCAR_GEN1_SATA,
148 RCAR_GEN2_SATA, 148 RCAR_GEN2_SATA,
149 RCAR_R8A7790_ES1_SATA,
149}; 150};
150 151
151struct sata_rcar_priv { 152struct sata_rcar_priv {
@@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host)
763 ap->udma_mask = ATA_UDMA6; 764 ap->udma_mask = ATA_UDMA6;
764 ap->flags |= ATA_FLAG_SATA; 765 ap->flags |= ATA_FLAG_SATA;
765 766
767 if (priv->type == RCAR_R8A7790_ES1_SATA)
768 ap->flags |= ATA_FLAG_NO_DIPM;
769
766 ioaddr->cmd_addr = base + SDATA_REG; 770 ioaddr->cmd_addr = base + SDATA_REG;
767 ioaddr->ctl_addr = base + SSDEVCON_REG; 771 ioaddr->ctl_addr = base + SSDEVCON_REG;
768 ioaddr->scr_addr = base + SCRSSTS_REG; 772 ioaddr->scr_addr = base + SCRSSTS_REG;
@@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
792 sata_rcar_gen1_phy_init(priv); 796 sata_rcar_gen1_phy_init(priv);
793 break; 797 break;
794 case RCAR_GEN2_SATA: 798 case RCAR_GEN2_SATA:
799 case RCAR_R8A7790_ES1_SATA:
795 sata_rcar_gen2_phy_init(priv); 800 sata_rcar_gen2_phy_init(priv);
796 break; 801 break;
797 default: 802 default:
@@ -838,9 +843,17 @@ static struct of_device_id sata_rcar_match[] = {
838 .data = (void *)RCAR_GEN2_SATA 843 .data = (void *)RCAR_GEN2_SATA
839 }, 844 },
840 { 845 {
846 .compatible = "renesas,sata-r8a7790-es1",
847 .data = (void *)RCAR_R8A7790_ES1_SATA
848 },
849 {
841 .compatible = "renesas,sata-r8a7791", 850 .compatible = "renesas,sata-r8a7791",
842 .data = (void *)RCAR_GEN2_SATA 851 .data = (void *)RCAR_GEN2_SATA
843 }, 852 },
853 {
854 .compatible = "renesas,sata-r8a7793",
855 .data = (void *)RCAR_GEN2_SATA
856 },
844 { }, 857 { },
845}; 858};
846MODULE_DEVICE_TABLE(of, sata_rcar_match); 859MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -849,7 +862,9 @@ static const struct platform_device_id sata_rcar_id_table[] = {
849 { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ 862 { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
850 { "sata-r8a7779", RCAR_GEN1_SATA }, 863 { "sata-r8a7779", RCAR_GEN1_SATA },
851 { "sata-r8a7790", RCAR_GEN2_SATA }, 864 { "sata-r8a7790", RCAR_GEN2_SATA },
865 { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
852 { "sata-r8a7791", RCAR_GEN2_SATA }, 866 { "sata-r8a7791", RCAR_GEN2_SATA },
867 { "sata-r8a7793", RCAR_GEN2_SATA },
853 { }, 868 { },
854}; 869};
855MODULE_DEVICE_TABLE(platform, sata_rcar_id_table); 870MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 40bc2f4072cc..fb83d4acd400 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
361 struct device *dev = pdd->dev; 361 struct device *dev = pdd->dev;
362 int ret = 0; 362 int ret = 0;
363 363
364 if (gpd_data->need_restore) 364 if (gpd_data->need_restore > 0)
365 return 0; 365 return 0;
366 366
367 /*
368 * If the value of the need_restore flag is still unknown at this point,
369 * we trust that pm_genpd_poweroff() has verified that the device is
370 * already runtime PM suspended.
371 */
372 if (gpd_data->need_restore < 0) {
373 gpd_data->need_restore = 1;
374 return 0;
375 }
376
367 mutex_unlock(&genpd->lock); 377 mutex_unlock(&genpd->lock);
368 378
369 genpd_start_dev(genpd, dev); 379 genpd_start_dev(genpd, dev);
@@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
373 mutex_lock(&genpd->lock); 383 mutex_lock(&genpd->lock);
374 384
375 if (!ret) 385 if (!ret)
376 gpd_data->need_restore = true; 386 gpd_data->need_restore = 1;
377 387
378 return ret; 388 return ret;
379} 389}
@@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
389{ 399{
390 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 400 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
391 struct device *dev = pdd->dev; 401 struct device *dev = pdd->dev;
392 bool need_restore = gpd_data->need_restore; 402 int need_restore = gpd_data->need_restore;
393 403
394 gpd_data->need_restore = false; 404 gpd_data->need_restore = 0;
395 mutex_unlock(&genpd->lock); 405 mutex_unlock(&genpd->lock);
396 406
397 genpd_start_dev(genpd, dev); 407 genpd_start_dev(genpd, dev);
408
409 /*
410 * Call genpd_restore_dev() for recently added devices too (need_restore
411 * is negative then).
412 */
398 if (need_restore) 413 if (need_restore)
399 genpd_restore_dev(genpd, dev); 414 genpd_restore_dev(genpd, dev);
400 415
@@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
603static int pm_genpd_runtime_suspend(struct device *dev) 618static int pm_genpd_runtime_suspend(struct device *dev)
604{ 619{
605 struct generic_pm_domain *genpd; 620 struct generic_pm_domain *genpd;
621 struct generic_pm_domain_data *gpd_data;
606 bool (*stop_ok)(struct device *__dev); 622 bool (*stop_ok)(struct device *__dev);
607 int ret; 623 int ret;
608 624
@@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
628 return 0; 644 return 0;
629 645
630 mutex_lock(&genpd->lock); 646 mutex_lock(&genpd->lock);
647
648 /*
649 * If we have an unknown state of the need_restore flag, it means none
650 * of the runtime PM callbacks has been invoked yet. Let's update the
651 * flag to reflect that the current state is active.
652 */
653 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
654 if (gpd_data->need_restore < 0)
655 gpd_data->need_restore = 0;
656
631 genpd->in_progress++; 657 genpd->in_progress++;
632 pm_genpd_poweroff(genpd); 658 pm_genpd_poweroff(genpd);
633 genpd->in_progress--; 659 genpd->in_progress--;
@@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1437 spin_unlock_irq(&dev->power.lock); 1463 spin_unlock_irq(&dev->power.lock);
1438 1464
1439 if (genpd->attach_dev) 1465 if (genpd->attach_dev)
1440 genpd->attach_dev(dev); 1466 genpd->attach_dev(genpd, dev);
1441 1467
1442 mutex_lock(&gpd_data->lock); 1468 mutex_lock(&gpd_data->lock);
1443 gpd_data->base.dev = dev; 1469 gpd_data->base.dev = dev;
1444 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1470 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1445 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1471 gpd_data->need_restore = -1;
1446 gpd_data->td.constraint_changed = true; 1472 gpd_data->td.constraint_changed = true;
1447 gpd_data->td.effective_constraint_ns = -1; 1473 gpd_data->td.effective_constraint_ns = -1;
1448 mutex_unlock(&gpd_data->lock); 1474 mutex_unlock(&gpd_data->lock);
@@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1499 genpd->max_off_time_changed = true; 1525 genpd->max_off_time_changed = true;
1500 1526
1501 if (genpd->detach_dev) 1527 if (genpd->detach_dev)
1502 genpd->detach_dev(dev); 1528 genpd->detach_dev(genpd, dev);
1503 1529
1504 spin_lock_irq(&dev->power.lock); 1530 spin_lock_irq(&dev->power.lock);
1505 1531
@@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
1546 1572
1547 psd = dev_to_psd(dev); 1573 psd = dev_to_psd(dev);
1548 if (psd && psd->domain_data) 1574 if (psd && psd->domain_data)
1549 to_gpd_data(psd->domain_data)->need_restore = val; 1575 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1550 1576
1551 spin_unlock_irqrestore(&dev->power.lock, flags); 1577 spin_unlock_irqrestore(&dev->power.lock, flags);
1552} 1578}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 2ad0b5bce44b..3920ee45aa59 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -560,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
560 } 560 }
561 561
562 if (page_zero_filled(uncmem)) { 562 if (page_zero_filled(uncmem)) {
563 kunmap_atomic(user_mem); 563 if (user_mem)
564 kunmap_atomic(user_mem);
564 /* Free memory associated with this sector now. */ 565 /* Free memory associated with this sector now. */
565 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); 566 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
566 zram_free_page(zram, index); 567 zram_free_page(zram, index);
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 6226aa08c36a..bcf86f91800a 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -25,18 +25,21 @@
25#include <asm/vio.h> 25#include <asm/vio.h>
26 26
27 27
28static int pseries_rng_data_read(struct hwrng *rng, u32 *data) 28static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
29{ 29{
30 u64 buffer[PLPAR_HCALL_BUFSIZE];
31 size_t size = max < 8 ? max : 8;
30 int rc; 32 int rc;
31 33
32 rc = plpar_hcall(H_RANDOM, (unsigned long *)data); 34 rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
33 if (rc != H_SUCCESS) { 35 if (rc != H_SUCCESS) {
34 pr_err_ratelimited("H_RANDOM call failed %d\n", rc); 36 pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
35 return -EIO; 37 return -EIO;
36 } 38 }
39 memcpy(data, buffer, size);
37 40
38 /* The hypervisor interface returns 64 bits */ 41 /* The hypervisor interface returns 64 bits */
39 return 8; 42 return size;
40} 43}
41 44
42/** 45/**
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
55 58
56static struct hwrng pseries_rng = { 59static struct hwrng pseries_rng = {
57 .name = KBUILD_MODNAME, 60 .name = KBUILD_MODNAME,
58 .data_read = pseries_rng_data_read, 61 .read = pseries_rng_read,
59}; 62};
60 63
61static int __init pseries_rng_probe(struct vio_dev *dev, 64static int __init pseries_rng_probe(struct vio_dev *dev,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index bfa640023e64..cf7a561fad7c 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1449,8 +1449,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1449 spin_lock_init(&port->outvq_lock); 1449 spin_lock_init(&port->outvq_lock);
1450 init_waitqueue_head(&port->waitqueue); 1450 init_waitqueue_head(&port->waitqueue);
1451 1451
1452 virtio_device_ready(portdev->vdev);
1453
1454 /* Fill the in_vq with buffers so the host can send us data. */ 1452 /* Fill the in_vq with buffers so the host can send us data. */
1455 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); 1453 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1456 if (!nr_added_bufs) { 1454 if (!nr_added_bufs) {
@@ -2026,6 +2024,8 @@ static int virtcons_probe(struct virtio_device *vdev)
2026 spin_lock_init(&portdev->ports_lock); 2024 spin_lock_init(&portdev->ports_lock);
2027 INIT_LIST_HEAD(&portdev->ports); 2025 INIT_LIST_HEAD(&portdev->ports);
2028 2026
2027 virtio_device_ready(portdev->vdev);
2028
2029 if (multiport) { 2029 if (multiport) {
2030 unsigned int nr_added_bufs; 2030 unsigned int nr_added_bufs;
2031 2031
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 23aaf40cf37f..f657c571b18e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -166,8 +166,8 @@ try_again:
166 if (ret == -EPROBE_DEFER) 166 if (ret == -EPROBE_DEFER)
167 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); 167 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
168 else 168 else
169 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret, 169 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
170 cpu); 170 ret);
171 } else { 171 } else {
172 *cdev = cpu_dev; 172 *cdev = cpu_dev;
173 *creg = cpu_reg; 173 *creg = cpu_reg;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 644b54e1e7d1..4473eba1d6b0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1022,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1022 1022
1023 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1023 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1024 1024
1025 policy->governor = NULL; 1025 if (policy)
1026 policy->governor = NULL;
1026 1027
1027 return policy; 1028 return policy;
1028} 1029}
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 871703c49d2c..e1eaf4ff9762 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
48 u32 *desc; 48 u32 *desc;
49 struct split_key_result result; 49 struct split_key_result result;
50 dma_addr_t dma_addr_in, dma_addr_out; 50 dma_addr_t dma_addr_in, dma_addr_out;
51 int ret = 0; 51 int ret = -ENOMEM;
52 52
53 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 53 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
54 if (!desc) { 54 if (!desc) {
55 dev_err(jrdev, "unable to allocate key input memory\n"); 55 dev_err(jrdev, "unable to allocate key input memory\n");
56 return -ENOMEM; 56 return ret;
57 } 57 }
58 58
59 init_job_desc(desc, 0);
60
61 dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, 59 dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
62 DMA_TO_DEVICE); 60 DMA_TO_DEVICE);
63 if (dma_mapping_error(jrdev, dma_addr_in)) { 61 if (dma_mapping_error(jrdev, dma_addr_in)) {
64 dev_err(jrdev, "unable to map key input memory\n"); 62 dev_err(jrdev, "unable to map key input memory\n");
65 kfree(desc); 63 goto out_free;
66 return -ENOMEM;
67 } 64 }
65
66 dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
67 DMA_FROM_DEVICE);
68 if (dma_mapping_error(jrdev, dma_addr_out)) {
69 dev_err(jrdev, "unable to map key output memory\n");
70 goto out_unmap_in;
71 }
72
73 init_job_desc(desc, 0);
68 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); 74 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
69 75
70 /* Sets MDHA up into an HMAC-INIT */ 76 /* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
81 * FIFO_STORE with the explicit split-key content store 87 * FIFO_STORE with the explicit split-key content store
82 * (0x26 output type) 88 * (0x26 output type)
83 */ 89 */
84 dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
85 DMA_FROM_DEVICE);
86 if (dma_mapping_error(jrdev, dma_addr_out)) {
87 dev_err(jrdev, "unable to map key output memory\n");
88 kfree(desc);
89 return -ENOMEM;
90 }
91 append_fifo_store(desc, dma_addr_out, split_key_len, 90 append_fifo_store(desc, dma_addr_out, split_key_len,
92 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 91 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
93 92
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
115 114
116 dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, 115 dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
117 DMA_FROM_DEVICE); 116 DMA_FROM_DEVICE);
117out_unmap_in:
118 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); 118 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
119 119out_free:
120 kfree(desc); 120 kfree(desc);
121
122 return ret; 121 return ret;
123} 122}
124EXPORT_SYMBOL(gen_split_key); 123EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 9282381b03ce..fe7b3f06f6e6 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -198,8 +198,7 @@ struct adf_accel_dev {
198 struct dentry *debugfs_dir; 198 struct dentry *debugfs_dir;
199 struct list_head list; 199 struct list_head list;
200 struct module *owner; 200 struct module *owner;
201 uint8_t accel_id;
202 uint8_t numa_node;
203 struct adf_accel_pci accel_pci_dev; 201 struct adf_accel_pci accel_pci_dev;
202 uint8_t accel_id;
204} __packed; 203} __packed;
205#endif 204#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 5f3fa45348b4..9dd2cb72a4e8 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
419 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); 419 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
420 ring = &bank->rings[i]; 420 ring = &bank->rings[i];
421 if (hw_data->tx_rings_mask & (1 << i)) { 421 if (hw_data->tx_rings_mask & (1 << i)) {
422 ring->inflights = kzalloc_node(sizeof(atomic_t), 422 ring->inflights =
423 GFP_KERNEL, 423 kzalloc_node(sizeof(atomic_t),
424 accel_dev->numa_node); 424 GFP_KERNEL,
425 dev_to_node(&GET_DEV(accel_dev)));
425 if (!ring->inflights) 426 if (!ring->inflights)
426 goto err; 427 goto err;
427 } else { 428 } else {
@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
469 int i, ret; 470 int i, ret;
470 471
471 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, 472 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
472 accel_dev->numa_node); 473 dev_to_node(&GET_DEV(accel_dev)));
473 if (!etr_data) 474 if (!etr_data)
474 return -ENOMEM; 475 return -ENOMEM;
475 476
476 num_banks = GET_MAX_BANKS(accel_dev); 477 num_banks = GET_MAX_BANKS(accel_dev);
477 size = num_banks * sizeof(struct adf_etr_bank_data); 478 size = num_banks * sizeof(struct adf_etr_bank_data);
478 etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); 479 etr_data->banks = kzalloc_node(size, GFP_KERNEL,
480 dev_to_node(&GET_DEV(accel_dev)));
479 if (!etr_data->banks) { 481 if (!etr_data->banks) {
480 ret = -ENOMEM; 482 ret = -ENOMEM;
481 goto err_bank; 483 goto err_bank;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f2e2f158cfbe..9e9619cd4a79 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
596 if (unlikely(!n)) 596 if (unlikely(!n))
597 return -EINVAL; 597 return -EINVAL;
598 598
599 bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); 599 bufl = kmalloc_node(sz, GFP_ATOMIC,
600 dev_to_node(&GET_DEV(inst->accel_dev)));
600 if (unlikely(!bufl)) 601 if (unlikely(!bufl))
601 return -ENOMEM; 602 return -ENOMEM;
602 603
@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
605 goto err; 606 goto err;
606 607
607 for_each_sg(assoc, sg, assoc_n, i) { 608 for_each_sg(assoc, sg, assoc_n, i) {
609 if (!sg->length)
610 continue;
608 bufl->bufers[bufs].addr = dma_map_single(dev, 611 bufl->bufers[bufs].addr = dma_map_single(dev,
609 sg_virt(sg), 612 sg_virt(sg),
610 sg->length, 613 sg->length,
@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
640 struct qat_alg_buf *bufers; 643 struct qat_alg_buf *bufers;
641 644
642 buflout = kmalloc_node(sz, GFP_ATOMIC, 645 buflout = kmalloc_node(sz, GFP_ATOMIC,
643 inst->accel_dev->numa_node); 646 dev_to_node(&GET_DEV(inst->accel_dev)));
644 if (unlikely(!buflout)) 647 if (unlikely(!buflout))
645 goto err; 648 goto err;
646 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); 649 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 0d59bcb50de1..828f2a686aab 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
109 109
110 list_for_each(itr, adf_devmgr_get_head()) { 110 list_for_each(itr, adf_devmgr_get_head()) {
111 accel_dev = list_entry(itr, struct adf_accel_dev, list); 111 accel_dev = list_entry(itr, struct adf_accel_dev, list);
112 if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) 112 if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
113 dev_to_node(&GET_DEV(accel_dev)) < 0)
114 && adf_dev_started(accel_dev))
113 break; 115 break;
114 accel_dev = NULL; 116 accel_dev = NULL;
115 } 117 }
116 if (!accel_dev) { 118 if (!accel_dev) {
117 pr_err("QAT: Could not find device on give node\n"); 119 pr_err("QAT: Could not find device on node %d\n", node);
118 accel_dev = adf_devmgr_get_first(); 120 accel_dev = adf_devmgr_get_first();
119 } 121 }
120 if (!accel_dev || !adf_dev_started(accel_dev)) 122 if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
164 166
165 for (i = 0; i < num_inst; i++) { 167 for (i = 0; i < num_inst; i++) {
166 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, 168 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
167 accel_dev->numa_node); 169 dev_to_node(&GET_DEV(accel_dev)));
168 if (!inst) 170 if (!inst)
169 goto err; 171 goto err;
170 172
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
index 978d6c56639d..53c491b59f07 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
108 uint64_t reg_val; 108 uint64_t reg_val;
109 109
110 admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, 110 admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
111 accel_dev->numa_node); 111 dev_to_node(&GET_DEV(accel_dev)));
112 if (!admin) 112 if (!admin)
113 return -ENOMEM; 113 return -ENOMEM;
114 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 114 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 0d0435a41be9..948f66be262b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
119 kfree(accel_dev); 119 kfree(accel_dev);
120} 120}
121 121
122static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
123{
124 unsigned int bus_per_cpu = 0;
125 struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
126
127 if (!c->phys_proc_id)
128 return 0;
129
130 bus_per_cpu = 256 / (c->phys_proc_id + 1);
131
132 if (bus_per_cpu != 0)
133 return pdev->bus->number / bus_per_cpu;
134 return 0;
135}
136
137static int qat_dev_start(struct adf_accel_dev *accel_dev) 122static int qat_dev_start(struct adf_accel_dev *accel_dev)
138{ 123{
139 int cpus = num_online_cpus(); 124 int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
235 void __iomem *pmisc_bar_addr = NULL; 220 void __iomem *pmisc_bar_addr = NULL;
236 char name[ADF_DEVICE_NAME_LENGTH]; 221 char name[ADF_DEVICE_NAME_LENGTH];
237 unsigned int i, bar_nr; 222 unsigned int i, bar_nr;
238 uint8_t node;
239 int ret; 223 int ret;
240 224
241 switch (ent->device) { 225 switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
246 return -ENODEV; 230 return -ENODEV;
247 } 231 }
248 232
249 node = adf_get_dev_node_id(pdev); 233 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
250 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); 234 /* If the accelerator is connected to a node with no memory
235 * there is no point in using the accelerator since the remote
236 * memory transaction will be very slow. */
237 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
238 return -EINVAL;
239 }
240
241 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
242 dev_to_node(&pdev->dev));
251 if (!accel_dev) 243 if (!accel_dev)
252 return -ENOMEM; 244 return -ENOMEM;
253 245
254 accel_dev->numa_node = node;
255 INIT_LIST_HEAD(&accel_dev->crypto_list); 246 INIT_LIST_HEAD(&accel_dev->crypto_list);
256 247
257 /* Add accel device to accel table. 248 /* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
264 255
265 accel_dev->owner = THIS_MODULE; 256 accel_dev->owner = THIS_MODULE;
266 /* Allocate and configure device configuration structure */ 257 /* Allocate and configure device configuration structure */
267 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); 258 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
259 dev_to_node(&pdev->dev));
268 if (!hw_data) { 260 if (!hw_data) {
269 ret = -ENOMEM; 261 ret = -ENOMEM;
270 goto out_err; 262 goto out_err;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 67ec61e51185..d96ee21b9b77 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
168 uint32_t msix_num_entries = hw_data->num_banks + 1; 168 uint32_t msix_num_entries = hw_data->num_banks + 1;
169 169
170 entries = kzalloc_node(msix_num_entries * sizeof(*entries), 170 entries = kzalloc_node(msix_num_entries * sizeof(*entries),
171 GFP_KERNEL, accel_dev->numa_node); 171 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
172 if (!entries) 172 if (!entries)
173 return -ENOMEM; 173 return -ENOMEM;
174 174
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 5d997a33907e..2a3973a7c441 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
1637 _IOC_SIZE(cmd) > sizeof(buffer)) 1637 _IOC_SIZE(cmd) > sizeof(buffer))
1638 return -ENOTTY; 1638 return -ENOTTY;
1639 1639
1640 if (_IOC_DIR(cmd) == _IOC_READ) 1640 memset(&buffer, 0, sizeof(buffer));
1641 memset(&buffer, 0, _IOC_SIZE(cmd));
1642 1641
1643 if (_IOC_DIR(cmd) & _IOC_WRITE) 1642 if (_IOC_DIR(cmd) & _IOC_WRITE)
1644 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) 1643 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 055d5e7fbf12..2318b4c7a8f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -986,6 +986,15 @@ static int i915_pm_freeze(struct device *dev)
986 return i915_drm_freeze(drm_dev); 986 return i915_drm_freeze(drm_dev);
987} 987}
988 988
989static int i915_pm_freeze_late(struct device *dev)
990{
991 struct pci_dev *pdev = to_pci_dev(dev);
992 struct drm_device *drm_dev = pci_get_drvdata(pdev);
993 struct drm_i915_private *dev_priv = drm_dev->dev_private;
994
995 return intel_suspend_complete(dev_priv);
996}
997
989static int i915_pm_thaw_early(struct device *dev) 998static int i915_pm_thaw_early(struct device *dev)
990{ 999{
991 struct pci_dev *pdev = to_pci_dev(dev); 1000 struct pci_dev *pdev = to_pci_dev(dev);
@@ -1570,6 +1579,7 @@ static const struct dev_pm_ops i915_pm_ops = {
1570 .resume_early = i915_pm_resume_early, 1579 .resume_early = i915_pm_resume_early,
1571 .resume = i915_pm_resume, 1580 .resume = i915_pm_resume,
1572 .freeze = i915_pm_freeze, 1581 .freeze = i915_pm_freeze,
1582 .freeze_late = i915_pm_freeze_late,
1573 .thaw_early = i915_pm_thaw_early, 1583 .thaw_early = i915_pm_thaw_early,
1574 .thaw = i915_pm_thaw, 1584 .thaw = i915_pm_thaw,
1575 .poweroff = i915_pm_poweroff, 1585 .poweroff = i915_pm_poweroff,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b672b843fd5e..728938f02341 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1902,6 +1902,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
1902 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 1902 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1903 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 1903 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1904 1904
1905 if (!USES_PPGTT(dev_priv->dev))
1906 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
1907 * so RTL will always use the value corresponding to
1908 * pat_sel = 000".
1909 * So let's disable cache for GGTT to avoid screen corruptions.
1910 * MOCS still can be used though.
1911 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
1912 * before this patch, i.e. the same uncached + snooping access
1913 * like on gen6/7 seems to be in effect.
1914 * - So this just fixes blitter/render access. Again it looks
1915 * like it's not just uncached access, but uncached + snooping.
1916 * So we can still hold onto all our assumptions wrt cpu
1917 * clflushing on LLC machines.
1918 */
1919 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
1920
1905 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 1921 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1906 * write would work. */ 1922 * write would work. */
1907 I915_WRITE(GEN8_PRIVATE_PAT, pat); 1923 I915_WRITE(GEN8_PRIVATE_PAT, pat);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 0e018cb49147..41b3be217493 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1098,12 +1098,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
1098 struct drm_device *dev = connector->base.dev; 1098 struct drm_device *dev = connector->base.dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
1100 struct intel_panel *panel = &connector->panel; 1100 struct intel_panel *panel = &connector->panel;
1101 int min;
1101 1102
1102 WARN_ON(panel->backlight.max == 0); 1103 WARN_ON(panel->backlight.max == 0);
1103 1104
1105 /*
1106 * XXX: If the vbt value is 255, it makes min equal to max, which leads
1107 * to problems. There are such machines out there. Either our
1108 * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
1109 * against this by letting the minimum be at most (arbitrarily chosen)
1110 * 25% of the max.
1111 */
1112 min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
1113 if (min != dev_priv->vbt.backlight.min_brightness) {
1114 DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
1115 dev_priv->vbt.backlight.min_brightness, min);
1116 }
1117
1104 /* vbt value is a coefficient in range [0..255] */ 1118 /* vbt value is a coefficient in range [0..255] */
1105 return scale(dev_priv->vbt.backlight.min_brightness, 0, 255, 1119 return scale(min, 0, 255, 0, panel->backlight.max);
1106 0, panel->backlight.max);
1107} 1120}
1108 1121
1109static int bdw_setup_backlight(struct intel_connector *connector) 1122static int bdw_setup_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 377afa504d2b..89c01fa6dd8e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4313,8 +4313,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
4313 /* init the CE partitions. CE only used for gfx on CIK */ 4313 /* init the CE partitions. CE only used for gfx on CIK */
4314 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 4314 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4315 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 4315 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4316 radeon_ring_write(ring, 0xc000); 4316 radeon_ring_write(ring, 0x8000);
4317 radeon_ring_write(ring, 0xc000); 4317 radeon_ring_write(ring, 0x8000);
4318 4318
4319 /* setup clear context state */ 4319 /* setup clear context state */
4320 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 4320 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
9447 u32 num_heads = 0, lb_size; 9447 u32 num_heads = 0, lb_size;
9448 int i; 9448 int i;
9449 9449
9450 if (!rdev->mode_info.mode_config_initialized)
9451 return;
9452
9450 radeon_update_display_priority(rdev); 9453 radeon_update_display_priority(rdev);
9451 9454
9452 for (i = 0; i < rdev->num_crtc; i++) { 9455 for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 4e8432d07f15..d748963af08b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -667,17 +667,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
667{ 667{
668 struct radeon_ib ib; 668 struct radeon_ib ib;
669 unsigned i; 669 unsigned i;
670 unsigned index;
670 int r; 671 int r;
671 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
672 u32 tmp = 0; 672 u32 tmp = 0;
673 u64 gpu_addr;
673 674
674 if (!ptr) { 675 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
675 DRM_ERROR("invalid vram scratch pointer\n"); 676 index = R600_WB_DMA_RING_TEST_OFFSET;
676 return -EINVAL; 677 else
677 } 678 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
679
680 gpu_addr = rdev->wb.gpu_addr + index;
678 681
679 tmp = 0xCAFEDEAD; 682 tmp = 0xCAFEDEAD;
680 writel(tmp, ptr); 683 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
681 684
682 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 685 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
683 if (r) { 686 if (r) {
@@ -686,8 +689,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
686 } 689 }
687 690
688 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 691 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
689 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 692 ib.ptr[1] = lower_32_bits(gpu_addr);
690 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); 693 ib.ptr[2] = upper_32_bits(gpu_addr);
691 ib.ptr[3] = 1; 694 ib.ptr[3] = 1;
692 ib.ptr[4] = 0xDEADBEEF; 695 ib.ptr[4] = 0xDEADBEEF;
693 ib.length_dw = 5; 696 ib.length_dw = 5;
@@ -704,7 +707,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
704 return r; 707 return r;
705 } 708 }
706 for (i = 0; i < rdev->usec_timeout; i++) { 709 for (i = 0; i < rdev->usec_timeout; i++) {
707 tmp = readl(ptr); 710 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
708 if (tmp == 0xDEADBEEF) 711 if (tmp == 0xDEADBEEF)
709 break; 712 break;
710 DRM_UDELAY(1); 713 DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f37d39d2bbbc..85995b4e3338 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
2345 u32 num_heads = 0, lb_size; 2345 u32 num_heads = 0, lb_size;
2346 int i; 2346 int i;
2347 2347
2348 if (!rdev->mode_info.mode_config_initialized)
2349 return;
2350
2348 radeon_update_display_priority(rdev); 2351 radeon_update_display_priority(rdev);
2349 2352
2350 for (i = 0; i < rdev->num_crtc; i++) { 2353 for (i = 0; i < rdev->num_crtc; i++) {
@@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2552 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2555 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2553 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 2556 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
2554 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 2557 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2558 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2555 } 2559 }
2556 } else { 2560 } else {
2557 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2561 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 10f8be0ee173..b53b31a7b76f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3207 uint32_t pixel_bytes1 = 0; 3207 uint32_t pixel_bytes1 = 0;
3208 uint32_t pixel_bytes2 = 0; 3208 uint32_t pixel_bytes2 = 0;
3209 3209
3210 if (!rdev->mode_info.mode_config_initialized)
3211 return;
3212
3210 radeon_update_display_priority(rdev); 3213 radeon_update_display_priority(rdev);
3211 3214
3212 if (rdev->mode_info.crtcs[0]->base.enabled) { 3215 if (rdev->mode_info.crtcs[0]->base.enabled) {
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index aabc343b9a8f..cf0df45d455e 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -338,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
338{ 338{
339 struct radeon_ib ib; 339 struct radeon_ib ib;
340 unsigned i; 340 unsigned i;
341 unsigned index;
341 int r; 342 int r;
342 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
343 u32 tmp = 0; 343 u32 tmp = 0;
344 u64 gpu_addr;
344 345
345 if (!ptr) { 346 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
346 DRM_ERROR("invalid vram scratch pointer\n"); 347 index = R600_WB_DMA_RING_TEST_OFFSET;
347 return -EINVAL; 348 else
348 } 349 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
349 350
350 tmp = 0xCAFEDEAD; 351 gpu_addr = rdev->wb.gpu_addr + index;
351 writel(tmp, ptr);
352 352
353 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 353 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
354 if (r) { 354 if (r) {
@@ -357,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
357 } 357 }
358 358
359 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 359 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
360 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 360 ib.ptr[1] = lower_32_bits(gpu_addr);
361 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; 361 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
362 ib.ptr[3] = 0xDEADBEEF; 362 ib.ptr[3] = 0xDEADBEEF;
363 ib.length_dw = 4; 363 ib.length_dw = 4;
364 364
@@ -374,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
374 return r; 374 return r;
375 } 375 }
376 for (i = 0; i < rdev->usec_timeout; i++) { 376 for (i = 0; i < rdev->usec_timeout; i++) {
377 tmp = readl(ptr); 377 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
378 if (tmp == 0xDEADBEEF) 378 if (tmp == 0xDEADBEEF)
379 break; 379 break;
380 DRM_UDELAY(1); 380 DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f6db4629aaa..9acb1c3c005b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
879 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 879 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
880 /* FIXME: implement full support */ 880 /* FIXME: implement full support */
881 881
882 if (!rdev->mode_info.mode_config_initialized)
883 return;
884
882 radeon_update_display_priority(rdev); 885 radeon_update_display_priority(rdev);
883 886
884 if (rdev->mode_info.crtcs[0]->base.enabled) 887 if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3462b64369bf..0a2d36e81108 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
579 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; 579 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
580 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; 580 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
581 581
582 if (!rdev->mode_info.mode_config_initialized)
583 return;
584
582 radeon_update_display_priority(rdev); 585 radeon_update_display_priority(rdev);
583 586
584 if (rdev->mode_info.crtcs[0]->base.enabled) 587 if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8a477bf1fdb3..c55d653aaf5f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
1277 struct drm_display_mode *mode0 = NULL; 1277 struct drm_display_mode *mode0 = NULL;
1278 struct drm_display_mode *mode1 = NULL; 1278 struct drm_display_mode *mode1 = NULL;
1279 1279
1280 if (!rdev->mode_info.mode_config_initialized)
1281 return;
1282
1280 radeon_update_display_priority(rdev); 1283 radeon_update_display_priority(rdev);
1281 1284
1282 if (rdev->mode_info.crtcs[0]->base.enabled) 1285 if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eeea5b6a1775..7d5083dc4acb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
2384 u32 num_heads = 0, lb_size; 2384 u32 num_heads = 0, lb_size;
2385 int i; 2385 int i;
2386 2386
2387 if (!rdev->mode_info.mode_config_initialized)
2388 return;
2389
2387 radeon_update_display_priority(rdev); 2390 radeon_update_display_priority(rdev);
2388 2391
2389 for (i = 0; i < rdev->num_crtc; i++) { 2392 for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 73bd9e2e42bc..3402033fa52a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1659,6 +1659,7 @@ void hid_disconnect(struct hid_device *hdev)
1659 hdev->hiddev_disconnect(hdev); 1659 hdev->hiddev_disconnect(hdev);
1660 if (hdev->claimed & HID_CLAIMED_HIDRAW) 1660 if (hdev->claimed & HID_CLAIMED_HIDRAW)
1661 hidraw_disconnect(hdev); 1661 hidraw_disconnect(hdev);
1662 hdev->claimed = 0;
1662} 1663}
1663EXPORT_SYMBOL_GPL(hid_disconnect); 1664EXPORT_SYMBOL_GPL(hid_disconnect);
1664 1665
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e23ab8b30626..7c863738e419 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -299,6 +299,7 @@
299#define USB_VENDOR_ID_ELAN 0x04f3 299#define USB_VENDOR_ID_ELAN 0x04f3
300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
301#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b 301#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
302#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
302#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f 303#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
303 304
304#define USB_VENDOR_ID_ELECOM 0x056e 305#define USB_VENDOR_ID_ELECOM 0x056e
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 5014bb567b29..552671ee7c5d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,6 +72,7 @@ static const struct hid_blacklist {
72 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, 74 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, 76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 77 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
77 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index fcdbde4ec692..3057dfc7e3bc 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = {
234 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 234 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
235 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 235 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
236 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 236 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
237 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 237 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
238 {} 238 {}
239}; 239};
240MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); 240MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index d2bf2c97ae70..6a30eeea94be 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
181 181
182 opal = of_find_node_by_path("/ibm,opal/sensors"); 182 opal = of_find_node_by_path("/ibm,opal/sensors");
183 if (!opal) { 183 if (!opal) {
184 dev_err(&pdev->dev, "Opal node 'sensors' not found\n"); 184 dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
185 return -ENODEV; 185 return -ENODEV;
186 } 186 }
187 187
@@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void)
335 335
336 err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe); 336 err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
337 if (err) { 337 if (err) {
338 pr_err("Platfrom driver probe failed\n"); 338 if (err != -ENODEV)
339 pr_err("Platform driver probe failed (%d)\n", err);
340
339 goto exit_device_del; 341 goto exit_device_del;
340 } 342 }
341 343
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 823c877a1ec0..1991d9032c38 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev)
161static int pwm_fan_resume(struct device *dev) 161static int pwm_fan_resume(struct device *dev)
162{ 162{
163 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); 163 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
164 unsigned long duty;
165 int ret;
164 166
165 if (ctx->pwm_value) 167 if (ctx->pwm_value == 0)
166 return pwm_enable(ctx->pwm); 168 return 0;
167 return 0; 169
170 duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
171 ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
172 if (ret)
173 return ret;
174 return pwm_enable(ctx->pwm);
168} 175}
169#endif 176#endif
170 177
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 825ca1f87639..afe79719ea32 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c)
1434 1434
1435/* 1435/*
1436 * Test if the buffer is unused and too old, and commit it. 1436 * Test if the buffer is unused and too old, and commit it.
1437 * At if noio is set, we must not do any I/O because we hold 1437 * And if GFP_NOFS is used, we must not do any I/O because we hold
1438 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to 1438 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1439 * different bufio client. 1439 * rerouted to different bufio client.
1440 */ 1440 */
1441static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, 1441static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1442 unsigned long max_jiffies) 1442 unsigned long max_jiffies)
@@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1444 if (jiffies - b->last_accessed < max_jiffies) 1444 if (jiffies - b->last_accessed < max_jiffies)
1445 return 0; 1445 return 0;
1446 1446
1447 if (!(gfp & __GFP_IO)) { 1447 if (!(gfp & __GFP_FS)) {
1448 if (test_bit(B_READING, &b->state) || 1448 if (test_bit(B_READING, &b->state) ||
1449 test_bit(B_WRITING, &b->state) || 1449 test_bit(B_WRITING, &b->state) ||
1450 test_bit(B_DIRTY, &b->state)) 1450 test_bit(B_DIRTY, &b->state))
@@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1486 unsigned long freed; 1486 unsigned long freed;
1487 1487
1488 c = container_of(shrink, struct dm_bufio_client, shrinker); 1488 c = container_of(shrink, struct dm_bufio_client, shrinker);
1489 if (sc->gfp_mask & __GFP_IO) 1489 if (sc->gfp_mask & __GFP_FS)
1490 dm_bufio_lock(c); 1490 dm_bufio_lock(c);
1491 else if (!dm_bufio_trylock(c)) 1491 else if (!dm_bufio_trylock(c))
1492 return SHRINK_STOP; 1492 return SHRINK_STOP;
@@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1503 unsigned long count; 1503 unsigned long count;
1504 1504
1505 c = container_of(shrink, struct dm_bufio_client, shrinker); 1505 c = container_of(shrink, struct dm_bufio_client, shrinker);
1506 if (sc->gfp_mask & __GFP_IO) 1506 if (sc->gfp_mask & __GFP_FS)
1507 dm_bufio_lock(c); 1507 dm_bufio_lock(c);
1508 else if (!dm_bufio_trylock(c)) 1508 else if (!dm_bufio_trylock(c))
1509 return 0; 1509 return 0;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 4857fa4a5484..07c0fa0fa284 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -789,8 +789,7 @@ struct dm_raid_superblock {
789 __le32 layout; 789 __le32 layout;
790 __le32 stripe_sectors; 790 __le32 stripe_sectors;
791 791
792 __u8 pad[452]; /* Round struct to 512 bytes. */ 792 /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
793 /* Always set to 0 when writing. */
794} __packed; 793} __packed;
795 794
796static int read_disk_sb(struct md_rdev *rdev, int size) 795static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
827 test_bit(Faulty, &(rs->dev[i].rdev.flags))) 826 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
828 failed_devices |= (1ULL << i); 827 failed_devices |= (1ULL << i);
829 828
830 memset(sb, 0, sizeof(*sb)); 829 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
831 830
832 sb->magic = cpu_to_le32(DM_RAID_MAGIC); 831 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
833 sb->features = cpu_to_le32(0); /* No features yet */ 832 sb->features = cpu_to_le32(0); /* No features yet */
@@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
862 uint64_t events_sb, events_refsb; 861 uint64_t events_sb, events_refsb;
863 862
864 rdev->sb_start = 0; 863 rdev->sb_start = 0;
865 rdev->sb_size = sizeof(*sb); 864 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
865 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
866 DMERR("superblock size of a logical block is no longer valid");
867 return -EINVAL;
868 }
866 869
867 ret = read_disk_sb(rdev, rdev->sb_size); 870 ret = read_disk_sb(rdev, rdev->sb_size);
868 if (ret) 871 if (ret)
@@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1169 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); 1172 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1170 1173
1171 for (i = 0; i < rs->md.raid_disks; i++) { 1174 for (i = 0; i < rs->md.raid_disks; i++) {
1172 struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev); 1175 struct request_queue *q;
1176
1177 if (!rs->dev[i].rdev.bdev)
1178 continue;
1173 1179
1180 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1174 if (!q || !blk_queue_discard(q)) 1181 if (!q || !blk_queue_discard(q))
1175 return; 1182 return;
1176 1183
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d1600d2aa2e2..f8b37d4c05d8 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
159 sc->stripes_shift = __ffs(stripes); 159 sc->stripes_shift = __ffs(stripes);
160 160
161 r = dm_set_target_max_io_len(ti, chunk_size); 161 r = dm_set_target_max_io_len(ti, chunk_size);
162 if (r) 162 if (r) {
163 kfree(sc);
163 return r; 164 return r;
165 }
164 166
165 ti->num_flush_bios = stripes; 167 ti->num_flush_bios = stripes;
166 ti->num_discard_bios = stripes; 168 ti->num_discard_bios = stripes;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4843801173fe..0f86d802b533 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1936 return DM_MAPIO_SUBMITTED; 1936 return DM_MAPIO_SUBMITTED;
1937 } 1937 }
1938 1938
1939 /*
1940 * We must hold the virtual cell before doing the lookup, otherwise
1941 * there's a race with discard.
1942 */
1943 build_virtual_key(tc->td, block, &key);
1944 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1945 return DM_MAPIO_SUBMITTED;
1946
1939 r = dm_thin_find_block(td, block, 0, &result); 1947 r = dm_thin_find_block(td, block, 0, &result);
1940 1948
1941 /* 1949 /*
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1959 * shared flag will be set in their case. 1967 * shared flag will be set in their case.
1960 */ 1968 */
1961 thin_defer_bio(tc, bio); 1969 thin_defer_bio(tc, bio);
1970 cell_defer_no_holder_no_free(tc, &cell1);
1962 return DM_MAPIO_SUBMITTED; 1971 return DM_MAPIO_SUBMITTED;
1963 } 1972 }
1964 1973
1965 build_virtual_key(tc->td, block, &key);
1966 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1967 return DM_MAPIO_SUBMITTED;
1968
1969 build_data_key(tc->td, result.block, &key); 1974 build_data_key(tc->td, result.block, &key);
1970 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { 1975 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1971 cell_defer_no_holder_no_free(tc, &cell1); 1976 cell_defer_no_holder_no_free(tc, &cell1);
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1986 * of doing so. 1991 * of doing so.
1987 */ 1992 */
1988 handle_unserviceable_bio(tc->pool, bio); 1993 handle_unserviceable_bio(tc->pool, bio);
1994 cell_defer_no_holder_no_free(tc, &cell1);
1989 return DM_MAPIO_SUBMITTED; 1995 return DM_MAPIO_SUBMITTED;
1990 } 1996 }
1991 /* fall through */ 1997 /* fall through */
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1996 * provide the hint to load the metadata into cache. 2002 * provide the hint to load the metadata into cache.
1997 */ 2003 */
1998 thin_defer_bio(tc, bio); 2004 thin_defer_bio(tc, bio);
2005 cell_defer_no_holder_no_free(tc, &cell1);
1999 return DM_MAPIO_SUBMITTED; 2006 return DM_MAPIO_SUBMITTED;
2000 2007
2001 default: 2008 default:
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2005 * pool is switched to fail-io mode. 2012 * pool is switched to fail-io mode.
2006 */ 2013 */
2007 bio_io_error(bio); 2014 bio_io_error(bio);
2015 cell_defer_no_holder_no_free(tc, &cell1);
2008 return DM_MAPIO_SUBMITTED; 2016 return DM_MAPIO_SUBMITTED;
2009 } 2017 }
2010} 2018}
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 37d367bb9aa8..bf2b80d5c470 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -42,6 +42,12 @@ struct btree_node {
42} __packed; 42} __packed;
43 43
44 44
45/*
46 * Locks a block using the btree node validator.
47 */
48int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
49 struct dm_block **result);
50
45void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 51void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
46 struct dm_btree_value_type *vt); 52 struct dm_btree_value_type *vt);
47 53
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index cf9fd676ae44..1b5e13ec7f96 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
92 92
93/*----------------------------------------------------------------*/ 93/*----------------------------------------------------------------*/
94 94
95static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, 95int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
96 struct dm_block **result) 96 struct dm_block **result)
97{ 97{
98 return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); 98 return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 416060c25709..200ac12a1d40 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
847 * FIXME: We shouldn't use a recursive algorithm when we have limited stack 847 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
848 * space. Also this only works for single level trees. 848 * space. Also this only works for single level trees.
849 */ 849 */
850static int walk_node(struct ro_spine *s, dm_block_t block, 850static int walk_node(struct dm_btree_info *info, dm_block_t block,
851 int (*fn)(void *context, uint64_t *keys, void *leaf), 851 int (*fn)(void *context, uint64_t *keys, void *leaf),
852 void *context) 852 void *context)
853{ 853{
854 int r; 854 int r;
855 unsigned i, nr; 855 unsigned i, nr;
856 struct dm_block *node;
856 struct btree_node *n; 857 struct btree_node *n;
857 uint64_t keys; 858 uint64_t keys;
858 859
859 r = ro_step(s, block); 860 r = bn_read_lock(info, block, &node);
860 n = ro_node(s); 861 if (r)
862 return r;
863
864 n = dm_block_data(node);
861 865
862 nr = le32_to_cpu(n->header.nr_entries); 866 nr = le32_to_cpu(n->header.nr_entries);
863 for (i = 0; i < nr; i++) { 867 for (i = 0; i < nr; i++) {
864 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { 868 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
865 r = walk_node(s, value64(n, i), fn, context); 869 r = walk_node(info, value64(n, i), fn, context);
866 if (r) 870 if (r)
867 goto out; 871 goto out;
868 } else { 872 } else {
@@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
874 } 878 }
875 879
876out: 880out:
877 ro_pop(s); 881 dm_tm_unlock(info->tm, node);
878 return r; 882 return r;
879} 883}
880 884
@@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
882 int (*fn)(void *context, uint64_t *keys, void *leaf), 886 int (*fn)(void *context, uint64_t *keys, void *leaf),
883 void *context) 887 void *context)
884{ 888{
885 int r;
886 struct ro_spine spine;
887
888 BUG_ON(info->levels > 1); 889 BUG_ON(info->levels > 1);
889 890 return walk_node(info, root, fn, context);
890 init_ro_spine(&spine, info);
891 r = walk_node(&spine, root, fn, context);
892 exit_ro_spine(&spine);
893
894 return r;
895} 891}
896EXPORT_SYMBOL_GPL(dm_btree_walk); 892EXPORT_SYMBOL_GPL(dm_btree_walk);
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cf008f45968c..711773e8e64b 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
240 goto err_irq_charger; 240 goto err_irq_charger;
241 } 241 }
242 242
243 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq, 243 ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
244 IRQF_ONESHOT | IRQF_SHARED | 244 IRQF_ONESHOT | IRQF_SHARED |
245 IRQF_TRIGGER_FALLING, 0, 245 IRQF_TRIGGER_FALLING, 0,
246 &max77693_muic_irq_chip, 246 &max77693_muic_irq_chip,
@@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
250 goto err_irq_muic; 250 goto err_irq_muic;
251 } 251 }
252 252
253 /* Unmask interrupts from all blocks in interrupt source register */
254 ret = regmap_update_bits(max77693->regmap,
255 MAX77693_PMIC_REG_INTSRC_MASK,
256 SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
257 if (ret < 0) {
258 dev_err(max77693->dev,
259 "Could not unmask interrupts in INTSRC: %d\n",
260 ret);
261 goto err_intsrc;
262 }
263
253 pm_runtime_set_active(max77693->dev); 264 pm_runtime_set_active(max77693->dev);
254 265
255 ret = mfd_add_devices(max77693->dev, -1, max77693_devs, 266 ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
@@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
261 272
262err_mfd: 273err_mfd:
263 mfd_remove_devices(max77693->dev); 274 mfd_remove_devices(max77693->dev);
275err_intsrc:
264 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic); 276 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
265err_irq_muic: 277err_irq_muic:
266 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger); 278 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index f2643c221d34..30f7ca89a0e6 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
947 mutex_unlock(&pcr->pcr_mutex); 947 mutex_unlock(&pcr->pcr_mutex);
948} 948}
949 949
950#ifdef CONFIG_PM
950static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) 951static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
951{ 952{
952 if (pcr->ops->turn_off_led) 953 if (pcr->ops->turn_off_led)
@@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
961 if (pcr->ops->force_power_down) 962 if (pcr->ops->force_power_down)
962 pcr->ops->force_power_down(pcr, pm_state); 963 pcr->ops->force_power_down(pcr, pm_state);
963} 964}
965#endif
964 966
965static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) 967static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
966{ 968{
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 2d045f26f193..bee0abf82040 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
269#define STMPE24XX_REG_CHIP_ID 0x80 269#define STMPE24XX_REG_CHIP_ID 0x80
270#define STMPE24XX_REG_IEGPIOR_LSB 0x18 270#define STMPE24XX_REG_IEGPIOR_LSB 0x18
271#define STMPE24XX_REG_ISGPIOR_MSB 0x19 271#define STMPE24XX_REG_ISGPIOR_MSB 0x19
272#define STMPE24XX_REG_GPMR_LSB 0xA5 272#define STMPE24XX_REG_GPMR_LSB 0xA4
273#define STMPE24XX_REG_GPSR_LSB 0x85 273#define STMPE24XX_REG_GPSR_LSB 0x85
274#define STMPE24XX_REG_GPCR_LSB 0x88 274#define STMPE24XX_REG_GPCR_LSB 0x88
275#define STMPE24XX_REG_GPDR_LSB 0x8B 275#define STMPE24XX_REG_GPDR_LSB 0x8B
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index cf92a6d1c532..50f9091bcd38 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b;
44#define PWR_DEVSLP BIT(1) 44#define PWR_DEVSLP BIT(1)
45#define PWR_DEVOFF BIT(0) 45#define PWR_DEVOFF BIT(0)
46 46
47/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
48#define STARTON_SWBUG BIT(7) /* Start on watchdog */
49#define STARTON_VBUS BIT(5) /* Start on VBUS */
50#define STARTON_VBAT BIT(4) /* Start on battery insert */
51#define STARTON_RTC BIT(3) /* Start on RTC */
52#define STARTON_USB BIT(2) /* Start on USB host */
53#define STARTON_CHG BIT(1) /* Start on charger */
54#define STARTON_PWON BIT(0) /* Start on PWRON button */
55
47#define SEQ_OFFSYNC (1 << 0) 56#define SEQ_OFFSYNC (1 << 0)
48 57
49#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36) 58#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
@@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
606 return 0; 615 return 0;
607} 616}
608 617
618static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
619{
620 u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
621 TWL4030_PM_MASTER_CFG_P2_TRANSITION,
622 TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
623 u8 val;
624 int i, err;
625
626 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
627 TWL4030_PM_MASTER_PROTECT_KEY);
628 if (err)
629 goto relock;
630 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
631 TWL4030_PM_MASTER_KEY_CFG2,
632 TWL4030_PM_MASTER_PROTECT_KEY);
633 if (err)
634 goto relock;
635
636 for (i = 0; i < sizeof(regs); i++) {
637 err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
638 &val, regs[i]);
639 if (err)
640 break;
641 val = (~bitmask & val) | (bitmask & bitvalues);
642 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
643 val, regs[i]);
644 if (err)
645 break;
646 }
647
648 if (err)
649 pr_err("TWL4030 Register access failed: %i\n", err);
650
651relock:
652 return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
653 TWL4030_PM_MASTER_PROTECT_KEY);
654}
655
609/* 656/*
610 * In master mode, start the power off sequence. 657 * In master mode, start the power off sequence.
611 * After a successful execution, TWL shuts down the power to the SoC 658 * After a successful execution, TWL shuts down the power to the SoC
@@ -615,6 +662,11 @@ void twl4030_power_off(void)
615{ 662{
616 int err; 663 int err;
617 664
665 /* Disable start on charger or VBUS as it can break poweroff */
666 err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
667 if (err)
668 pr_err("TWL4030 Unable to configure start-up\n");
669
618 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF, 670 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
619 TWL4030_PM_MASTER_P1_SW_EVENTS); 671 TWL4030_PM_MASTER_P1_SW_EVENTS);
620 if (err) 672 if (err)
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
index e00f5340ed87..3c2b8f9e3c84 100644
--- a/drivers/mfd/viperboard.c
+++ b/drivers/mfd/viperboard.c
@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
93 version >> 8, version & 0xff, 93 version >> 8, version & 0xff,
94 vb->usb_dev->bus->busnum, vb->usb_dev->devnum); 94 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
95 95
96 ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs, 96 ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
97 ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL); 97 vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
98 NULL);
98 if (ret != 0) { 99 if (ret != 0) {
99 dev_err(&interface->dev, "Failed to add mfd devices to core."); 100 dev_err(&interface->dev, "Failed to add mfd devices to core.");
100 goto error; 101 goto error;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 63ea1941e973..7ba83ffb08ac 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
576} 576}
577 577
578static void xgene_enet_reset(struct xgene_enet_pdata *pdata) 578bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
579{
580 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
581 return false;
582
583 if (ioread32(p->ring_csr_addr + SRST_ADDR))
584 return false;
585
586 return true;
587}
588
589static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
579{ 590{
580 u32 val; 591 u32 val;
581 592
593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV;
595
582 clk_prepare_enable(pdata->clk); 596 clk_prepare_enable(pdata->clk);
583 clk_disable_unprepare(pdata->clk); 597 clk_disable_unprepare(pdata->clk);
584 clk_prepare_enable(pdata->clk); 598 clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
590 val |= SCAN_AUTO_INCR; 604 val |= SCAN_AUTO_INCR;
591 MGMT_CLOCK_SEL_SET(&val, 1); 605 MGMT_CLOCK_SEL_SET(&val, 1);
592 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); 606 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
607
608 return 0;
593} 609}
594 610
595static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 611static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 38558584080e..ec45f3256f0e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
104#define BLOCK_ETH_MAC_OFFSET 0x0000 104#define BLOCK_ETH_MAC_OFFSET 0x0000
105#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 105#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
106 106
107#define CLKEN_ADDR 0xc208
108#define SRST_ADDR 0xc200
109
107#define MAC_ADDR_REG_OFFSET 0x00 110#define MAC_ADDR_REG_OFFSET 0x00
108#define MAC_COMMAND_REG_OFFSET 0x04 111#define MAC_COMMAND_REG_OFFSET 0x04
109#define MAC_WRITE_REG_OFFSET 0x08 112#define MAC_WRITE_REG_OFFSET 0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
318 321
319int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); 322int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
320void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); 323void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
324bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
321 325
322extern struct xgene_mac_ops xgene_gmac_ops; 326extern struct xgene_mac_ops xgene_gmac_ops;
323extern struct xgene_port_ops xgene_gport_ops; 327extern struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3c208cc6f6bb..123669696184 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
639 struct device *dev = ndev_to_dev(ndev); 639 struct device *dev = ndev_to_dev(ndev);
640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
641 struct xgene_enet_desc_ring *buf_pool = NULL; 641 struct xgene_enet_desc_ring *buf_pool = NULL;
642 u8 cpu_bufnum = 0, eth_bufnum = 0; 642 u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
643 u8 bp_bufnum = 0x20; 643 u8 bp_bufnum = START_BP_BUFNUM;
644 u16 ring_id, ring_num = 0; 644 u16 ring_id, ring_num = START_RING_NUM;
645 int ret; 645 int ret;
646 646
647 /* allocate rx descriptor ring */ 647 /* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
852 u16 dst_ring_num; 852 u16 dst_ring_num;
853 int ret; 853 int ret;
854 854
855 pdata->port_ops->reset(pdata); 855 ret = pdata->port_ops->reset(pdata);
856 if (ret)
857 return ret;
856 858
857 ret = xgene_enet_create_desc_rings(ndev); 859 ret = xgene_enet_create_desc_rings(ndev);
858 if (ret) { 860 if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
954 956
955 return ret; 957 return ret;
956err: 958err:
959 unregister_netdev(ndev);
957 free_netdev(ndev); 960 free_netdev(ndev);
958 return ret; 961 return ret;
959} 962}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 874e5a01161f..f9958fae6ffd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,9 @@
38#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) 38#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
39#define NUM_PKT_BUF 64 39#define NUM_PKT_BUF 64
40#define NUM_BUFPOOL 32 40#define NUM_BUFPOOL 32
41#define START_ETH_BUFNUM 2
42#define START_BP_BUFNUM 0x22
43#define START_RING_NUM 8
41 44
42#define PHY_POLL_LINK_ON (10 * HZ) 45#define PHY_POLL_LINK_ON (10 * HZ)
43#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) 46#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
83}; 86};
84 87
85struct xgene_port_ops { 88struct xgene_port_ops {
86 void (*reset)(struct xgene_enet_pdata *pdata); 89 int (*reset)(struct xgene_enet_pdata *pdata);
87 void (*cle_bypass)(struct xgene_enet_pdata *pdata, 90 void (*cle_bypass)(struct xgene_enet_pdata *pdata,
88 u32 dst_ring_num, u16 bufpool_id); 91 u32 dst_ring_num, u16 bufpool_id);
89 void (*shutdown)(struct xgene_enet_pdata *pdata); 92 void (*shutdown)(struct xgene_enet_pdata *pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index c22f32622fa9..f5d4f68c288c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -311,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
311 xgene_sgmac_rxtx(p, TX_EN, false); 311 xgene_sgmac_rxtx(p, TX_EN, false);
312} 312}
313 313
314static void xgene_enet_reset(struct xgene_enet_pdata *p) 314static int xgene_enet_reset(struct xgene_enet_pdata *p)
315{ 315{
316 if (!xgene_ring_mgr_init(p))
317 return -ENODEV;
318
316 clk_prepare_enable(p->clk); 319 clk_prepare_enable(p->clk);
317 clk_disable_unprepare(p->clk); 320 clk_disable_unprepare(p->clk);
318 clk_prepare_enable(p->clk); 321 clk_prepare_enable(p->clk);
319 322
320 xgene_enet_ecc_init(p); 323 xgene_enet_ecc_init(p);
321 xgene_enet_config_ring_if_assoc(p); 324 xgene_enet_config_ring_if_assoc(p);
325
326 return 0;
322} 327}
323 328
324static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, 329static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 67d07206b3c7..a18a9d1f1143 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
252 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); 252 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
253} 253}
254 254
255static void xgene_enet_reset(struct xgene_enet_pdata *pdata) 255static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
256{ 256{
257 if (!xgene_ring_mgr_init(pdata))
258 return -ENODEV;
259
257 clk_prepare_enable(pdata->clk); 260 clk_prepare_enable(pdata->clk);
258 clk_disable_unprepare(pdata->clk); 261 clk_disable_unprepare(pdata->clk);
259 clk_prepare_enable(pdata->clk); 262 clk_prepare_enable(pdata->clk);
260 263
261 xgene_enet_ecc_init(pdata); 264 xgene_enet_ecc_init(pdata);
262 xgene_enet_config_ring_if_assoc(pdata); 265 xgene_enet_config_ring_if_assoc(pdata);
266
267 return 0;
263} 268}
264 269
265static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, 270static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3a6778a667f4..531bb7c57531 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1110 /* We just need one DMA descriptor which is DMA-able, since writing to 1110 /* We just need one DMA descriptor which is DMA-able, since writing to
1111 * the port will allocate a new descriptor in its internal linked-list 1111 * the port will allocate a new descriptor in its internal linked-list
1112 */ 1112 */
1113 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); 1113 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1114 GFP_KERNEL);
1114 if (!p) { 1115 if (!p) {
1115 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1116 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1116 return -ENOMEM; 1117 return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1174 if (!(reg & TDMA_DISABLED)) 1175 if (!(reg & TDMA_DISABLED))
1175 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1176 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1176 1177
1178 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1179 * fail, so by checking this pointer we know whether the TX ring was
1180 * fully initialized or not.
1181 */
1182 if (!ring->cbs)
1183 return;
1184
1177 napi_disable(&ring->napi); 1185 napi_disable(&ring->napi);
1178 netif_napi_del(&ring->napi); 1186 netif_napi_del(&ring->napi);
1179 1187
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1183 ring->cbs = NULL; 1191 ring->cbs = NULL;
1184 1192
1185 if (ring->desc_dma) { 1193 if (ring->desc_dma) {
1186 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); 1194 dma_free_coherent(kdev, sizeof(struct dma_desc),
1195 ring->desc_cpu, ring->desc_dma);
1187 ring->desc_dma = 0; 1196 ring->desc_dma = 0;
1188 } 1197 }
1189 ring->size = 0; 1198 ring->size = 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fdc9ec09e453..da1a2500c91c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
2140 goto err_irq0; 2140 goto err_irq0;
2141 } 2141 }
2142 2142
2143 /* Re-configure the port multiplexer towards the PHY device */
2144 bcmgenet_mii_config(priv->dev, false);
2145
2146 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2147 priv->phy_interface);
2148
2143 bcmgenet_netif_start(dev); 2149 bcmgenet_netif_start(dev);
2144 2150
2145 return 0; 2151 return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
2184 2190
2185 bcmgenet_netif_stop(dev); 2191 bcmgenet_netif_stop(dev);
2186 2192
2193 /* Really kill the PHY state machine and disconnect from it */
2194 phy_disconnect(priv->phydev);
2195
2187 /* Disable MAC receive */ 2196 /* Disable MAC receive */
2188 umac_enable_set(priv, CMD_RX_EN, false); 2197 umac_enable_set(priv, CMD_RX_EN, false);
2189 2198
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
2685 2694
2686 phy_init_hw(priv->phydev); 2695 phy_init_hw(priv->phydev);
2687 /* Speed settings must be restored */ 2696 /* Speed settings must be restored */
2688 bcmgenet_mii_config(priv->dev); 2697 bcmgenet_mii_config(priv->dev, false);
2689 2698
2690 /* disable ethernet MAC while updating its registers */ 2699 /* disable ethernet MAC while updating its registers */
2691 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); 2700 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbf524ea3b19..31b2da5f9b82 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
617 617
618/* MDIO routines */ 618/* MDIO routines */
619int bcmgenet_mii_init(struct net_device *dev); 619int bcmgenet_mii_init(struct net_device *dev);
620int bcmgenet_mii_config(struct net_device *dev); 620int bcmgenet_mii_config(struct net_device *dev, bool init);
621void bcmgenet_mii_exit(struct net_device *dev); 621void bcmgenet_mii_exit(struct net_device *dev);
622void bcmgenet_mii_reset(struct net_device *dev); 622void bcmgenet_mii_reset(struct net_device *dev);
623void bcmgenet_mii_setup(struct net_device *dev);
623 624
624/* Wake-on-LAN routines */ 625/* Wake-on-LAN routines */
625void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); 626void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ff799a9f801..933cd7e7cd33 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
77/* setup netdev link state when PHY link status change and 77/* setup netdev link state when PHY link status change and
78 * update UMAC and RGMII block when link up 78 * update UMAC and RGMII block when link up
79 */ 79 */
80static void bcmgenet_mii_setup(struct net_device *dev) 80void bcmgenet_mii_setup(struct net_device *dev)
81{ 81{
82 struct bcmgenet_priv *priv = netdev_priv(dev); 82 struct bcmgenet_priv *priv = netdev_priv(dev);
83 struct phy_device *phydev = priv->phydev; 83 struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
211 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); 211 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
212} 212}
213 213
214int bcmgenet_mii_config(struct net_device *dev) 214int bcmgenet_mii_config(struct net_device *dev, bool init)
215{ 215{
216 struct bcmgenet_priv *priv = netdev_priv(dev); 216 struct bcmgenet_priv *priv = netdev_priv(dev);
217 struct phy_device *phydev = priv->phydev; 217 struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
301 dev_info(kdev, "configuring instance for %s\n", phy_name); 301 if (init)
302 dev_info(kdev, "configuring instance for %s\n", phy_name);
302 303
303 return 0; 304 return 0;
304} 305}
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
350 * PHY speed which is needed for bcmgenet_mii_config() to configure 351 * PHY speed which is needed for bcmgenet_mii_config() to configure
351 * things appropriately. 352 * things appropriately.
352 */ 353 */
353 ret = bcmgenet_mii_config(dev); 354 ret = bcmgenet_mii_config(dev, true);
354 if (ret) { 355 if (ret) {
355 phy_disconnect(priv->phydev); 356 phy_disconnect(priv->phydev);
356 return ret; 357 return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6fe300e316c3..cca604994003 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
79 app.protocol = dcb->app_priority[i].protocolid; 79 app.protocol = dcb->app_priority[i].protocolid;
80 80
81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { 81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
82 app.priority = dcb->app_priority[i].user_prio_map;
82 app.selector = dcb->app_priority[i].sel_field + 1; 83 app.selector = dcb->app_priority[i].sel_field + 1;
83 err = dcb_ieee_setapp(dev, &app); 84 err = dcb_ieee_delapp(dev, &app);
84 } else { 85 } else {
85 app.selector = !!(dcb->app_priority[i].sel_field); 86 app.selector = !!(dcb->app_priority[i].sel_field);
86 err = dcb_setapp(dev, &app); 87 err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
122 case CXGB4_DCB_INPUT_FW_ENABLED: { 123 case CXGB4_DCB_INPUT_FW_ENABLED: {
123 /* we're going to use Firmware DCB */ 124 /* we're going to use Firmware DCB */
124 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; 125 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
125 dcb->supported = CXGB4_DCBX_FW_SUPPORT; 126 dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
127 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
128 dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
129 else
130 dcb->supported |= DCB_CAP_DCBX_VER_CEE;
126 break; 131 break;
127 } 132 }
128 133
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
436 *up_tc_map = (1 << tc); 441 *up_tc_map = (1 << tc);
437 442
438 /* prio_type is link strict */ 443 /* prio_type is link strict */
439 *prio_type = 0x2; 444 if (*pgid != 0xF)
445 *prio_type = 0x2;
440} 446}
441 447
442static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, 448static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
443 u8 *prio_type, u8 *pgid, u8 *bw_per, 449 u8 *prio_type, u8 *pgid, u8 *bw_per,
444 u8 *up_tc_map) 450 u8 *up_tc_map)
445{ 451{
446 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); 452 /* tc 0 is written at MSB position */
453 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
454 up_tc_map, 1);
447} 455}
448 456
449 457
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
451 u8 *prio_type, u8 *pgid, u8 *bw_per, 459 u8 *prio_type, u8 *pgid, u8 *bw_per,
452 u8 *up_tc_map) 460 u8 *up_tc_map)
453{ 461{
454 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); 462 /* tc 0 is written at MSB position */
463 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
464 up_tc_map, 0);
455} 465}
456 466
457static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, 467static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
461 struct fw_port_cmd pcmd; 471 struct fw_port_cmd pcmd;
462 struct port_info *pi = netdev2pinfo(dev); 472 struct port_info *pi = netdev2pinfo(dev);
463 struct adapter *adap = pi->adapter; 473 struct adapter *adap = pi->adapter;
474 int fw_tc = 7 - tc;
464 u32 _pgid; 475 u32 _pgid;
465 int err; 476 int err;
466 477
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
479 } 490 }
480 491
481 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); 492 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
482 _pgid &= ~(0xF << (tc * 4)); 493 _pgid &= ~(0xF << (fw_tc * 4));
483 _pgid |= pgid << (tc * 4); 494 _pgid |= pgid << (fw_tc * 4);
484 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); 495 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
485 496
486 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); 497 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
593 priority >= CXGB4_MAX_PRIORITY) 604 priority >= CXGB4_MAX_PRIORITY)
594 *pfccfg = 0; 605 *pfccfg = 0;
595 else 606 else
596 *pfccfg = (pi->dcb.pfcen >> priority) & 1; 607 *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
597} 608}
598 609
599/* Enable/disable Priority Pause Frames for the specified Traffic Class 610/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
618 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; 629 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
619 630
620 if (pfccfg) 631 if (pfccfg)
621 pcmd.u.dcb.pfc.pfcen |= (1 << priority); 632 pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
622 else 633 else
623 pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); 634 pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
624 635
625 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); 636 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
626 if (err != FW_PORT_DCB_CFG_SUCCESS) { 637 if (err != FW_PORT_DCB_CFG_SUCCESS) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 5e1b314e11af..39f2b13e66c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
2914int t4_sge_init(struct adapter *adap) 2914int t4_sge_init(struct adapter *adap)
2915{ 2915{
2916 struct sge *s = &adap->sge; 2916 struct sge *s = &adap->sge;
2917 u32 sge_control, sge_conm_ctrl; 2917 u32 sge_control, sge_control2, sge_conm_ctrl;
2918 unsigned int ingpadboundary, ingpackboundary;
2918 int ret, egress_threshold; 2919 int ret, egress_threshold;
2919 2920
2920 /* 2921 /*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
2924 sge_control = t4_read_reg(adap, SGE_CONTROL); 2925 sge_control = t4_read_reg(adap, SGE_CONTROL);
2925 s->pktshift = PKTSHIFT_GET(sge_control); 2926 s->pktshift = PKTSHIFT_GET(sge_control);
2926 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2927 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2927 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + 2928
2928 X_INGPADBOUNDARY_SHIFT); 2929 /* T4 uses a single control field to specify both the PCIe Padding and
2930 * Packing Boundary. T5 introduced the ability to specify these
2931 * separately. The actual Ingress Packet Data alignment boundary
2932 * within Packed Buffer Mode is the maximum of these two
2933 * specifications.
2934 */
2935 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
2936 X_INGPADBOUNDARY_SHIFT);
2937 if (is_t4(adap->params.chip)) {
2938 s->fl_align = ingpadboundary;
2939 } else {
2940 /* T5 has a different interpretation of one of the PCIe Packing
2941 * Boundary values.
2942 */
2943 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2944 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2945 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2946 ingpackboundary = 16;
2947 else
2948 ingpackboundary = 1 << (ingpackboundary +
2949 INGPACKBOUNDARY_SHIFT_X);
2950
2951 s->fl_align = max(ingpadboundary, ingpackboundary);
2952 }
2929 2953
2930 if (adap->flags & USING_SOFT_PARAMS) 2954 if (adap->flags & USING_SOFT_PARAMS)
2931 ret = t4_sge_init_soft(adap); 2955 ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9d9d74e4f09..163a2a14948c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3129 HOSTPAGESIZEPF6(sge_hps) | 3129 HOSTPAGESIZEPF6(sge_hps) |
3130 HOSTPAGESIZEPF7(sge_hps)); 3130 HOSTPAGESIZEPF7(sge_hps));
3131 3131
3132 t4_set_reg_field(adap, SGE_CONTROL, 3132 if (is_t4(adap->params.chip)) {
3133 INGPADBOUNDARY_MASK | 3133 t4_set_reg_field(adap, SGE_CONTROL,
3134 EGRSTATUSPAGESIZE_MASK, 3134 INGPADBOUNDARY_MASK |
3135 INGPADBOUNDARY(fl_align_log - 5) | 3135 EGRSTATUSPAGESIZE_MASK,
3136 EGRSTATUSPAGESIZE(stat_len != 64)); 3136 INGPADBOUNDARY(fl_align_log - 5) |
3137 3137 EGRSTATUSPAGESIZE(stat_len != 64));
3138 } else {
3139 /* T5 introduced the separation of the Free List Padding and
3140 * Packing Boundaries. Thus, we can select a smaller Padding
3141 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3142 * Bandwidth, and use a Packing Boundary which is large enough
3143 * to avoid false sharing between CPUs, etc.
3144 *
3145 * For the PCI Link, the smaller the Padding Boundary the
3146 * better. For the Memory Controller, a smaller Padding
3147 * Boundary is better until we cross under the Memory Line
3148 * Size (the minimum unit of transfer to/from Memory). If we
3149 * have a Padding Boundary which is smaller than the Memory
3150 * Line Size, that'll involve a Read-Modify-Write cycle on the
3151 * Memory Controller which is never good. For T5 the smallest
3152 * Padding Boundary which we can select is 32 bytes which is
3153 * larger than any known Memory Controller Line Size so we'll
3154 * use that.
3155 *
3156 * T5 has a different interpretation of the "0" value for the
3157 * Packing Boundary. This corresponds to 16 bytes instead of
3158 * the expected 32 bytes. We never have a Packing Boundary
3159 * less than 32 bytes so we can't use that special value but
3160 * on the other hand, if we wanted 32 bytes, the best we can
3161 * really do is 64 bytes.
3162 */
3163 if (fl_align <= 32) {
3164 fl_align = 64;
3165 fl_align_log = 6;
3166 }
3167 t4_set_reg_field(adap, SGE_CONTROL,
3168 INGPADBOUNDARY_MASK |
3169 EGRSTATUSPAGESIZE_MASK,
3170 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3171 EGRSTATUSPAGESIZE(stat_len != 64));
3172 t4_set_reg_field(adap, SGE_CONTROL2_A,
3173 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3174 INGPACKBOUNDARY_V(fl_align_log -
3175 INGPACKBOUNDARY_SHIFT_X));
3176 }
3138 /* 3177 /*
3139 * Adjust various SGE Free List Host Buffer Sizes. 3178 * Adjust various SGE Free List Host Buffer Sizes.
3140 * 3179 *
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1024db5dc13..8d2de1006b08 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
95#define X_INGPADBOUNDARY_SHIFT 5 95#define X_INGPADBOUNDARY_SHIFT 5
96 96
97#define SGE_CONTROL 0x1008 97#define SGE_CONTROL 0x1008
98#define SGE_CONTROL2_A 0x1124
98#define DCASYSTYPE 0x00080000U 99#define DCASYSTYPE 0x00080000U
99#define RXPKTCPLMODE_MASK 0x00040000U 100#define RXPKTCPLMODE_MASK 0x00040000U
100#define RXPKTCPLMODE_SHIFT 18 101#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
106#define PKTSHIFT_SHIFT 10 107#define PKTSHIFT_SHIFT 10
107#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 108#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
108#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) 109#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
110#define INGPCIEBOUNDARY_32B_X 0
109#define INGPCIEBOUNDARY_MASK 0x00000380U 111#define INGPCIEBOUNDARY_MASK 0x00000380U
110#define INGPCIEBOUNDARY_SHIFT 7 112#define INGPCIEBOUNDARY_SHIFT 7
111#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 113#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
114#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 116#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
115#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ 117#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
116 >> INGPADBOUNDARY_SHIFT) 118 >> INGPADBOUNDARY_SHIFT)
119#define INGPACKBOUNDARY_16B_X 0
120#define INGPACKBOUNDARY_SHIFT_X 5
121
122#define INGPACKBOUNDARY_S 16
123#define INGPACKBOUNDARY_M 0x7U
124#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
125#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
126 & INGPACKBOUNDARY_M)
117#define EGRPCIEBOUNDARY_MASK 0x0000000eU 127#define EGRPCIEBOUNDARY_MASK 0x0000000eU
118#define EGRPCIEBOUNDARY_SHIFT 1 128#define EGRPCIEBOUNDARY_SHIFT 1
119#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) 129#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ 299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ 300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
301 301
302 /* Decoded Adapter Parameters.
303 */
304 u32 fl_pg_order; /* large page allocation size */
305 u32 stat_len; /* length of status page at ring end */
306 u32 pktshift; /* padding between CPL & packet data */
307 u32 fl_align; /* response queue message alignment */
308 u32 fl_starve_thres; /* Free List starvation threshold */
309
302 /* 310 /*
303 * Reverse maps from Absolute Queue IDs to associated queue pointers. 311 * Reverse maps from Absolute Queue IDs to associated queue pointers.
304 * The absolute Queue IDs are in a compact range which start at a 312 * The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..fdd078d7d82c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
51#include "../cxgb4/t4_msg.h" 51#include "../cxgb4/t4_msg.h"
52 52
53/* 53/*
54 * Decoded Adapter Parameters.
55 */
56static u32 FL_PG_ORDER; /* large page allocation size */
57static u32 STAT_LEN; /* length of status page at ring end */
58static u32 PKTSHIFT; /* padding between CPL and packet data */
59static u32 FL_ALIGN; /* response queue message alignment */
60
61/*
62 * Constants ... 54 * Constants ...
63 */ 55 */
64enum { 56enum {
@@ -102,12 +94,6 @@ enum {
102 MAX_TIMER_TX_RECLAIM = 100, 94 MAX_TIMER_TX_RECLAIM = 100,
103 95
104 /* 96 /*
105 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
106 * timer will attempt to refill it.
107 */
108 FL_STARVE_THRES = 4,
109
110 /*
111 * Suspend an Ethernet TX queue with fewer available descriptors than 97 * Suspend an Ethernet TX queue with fewer available descriptors than
112 * this. We always want to have room for a maximum sized packet: 98 * this. We always want to have room for a maximum sized packet:
113 * inline immediate data + MAX_SKB_FRAGS. This is the same as 99 * inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
264 250
265/** 251/**
266 * fl_starving - return whether a Free List is starving. 252 * fl_starving - return whether a Free List is starving.
253 * @adapter: pointer to the adapter
267 * @fl: the Free List 254 * @fl: the Free List
268 * 255 *
269 * Tests specified Free List to see whether the number of buffers 256 * Tests specified Free List to see whether the number of buffers
270 * available to the hardware has falled below our "starvation" 257 * available to the hardware has falled below our "starvation"
271 * threshold. 258 * threshold.
272 */ 259 */
273static inline bool fl_starving(const struct sge_fl *fl) 260static inline bool fl_starving(const struct adapter *adapter,
261 const struct sge_fl *fl)
274{ 262{
275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 263 const struct sge *s = &adapter->sge;
264
265 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
276} 266}
277 267
278/** 268/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
457 447
458/** 448/**
459 * get_buf_size - return the size of an RX Free List buffer. 449 * get_buf_size - return the size of an RX Free List buffer.
450 * @adapter: pointer to the associated adapter
460 * @sdesc: pointer to the software buffer descriptor 451 * @sdesc: pointer to the software buffer descriptor
461 */ 452 */
462static inline int get_buf_size(const struct rx_sw_desc *sdesc) 453static inline int get_buf_size(const struct adapter *adapter,
454 const struct rx_sw_desc *sdesc)
463{ 455{
464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 456 const struct sge *s = &adapter->sge;
465 ? (PAGE_SIZE << FL_PG_ORDER) 457
466 : PAGE_SIZE; 458 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
459 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
467} 460}
468 461
469/** 462/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
483 476
484 if (is_buf_mapped(sdesc)) 477 if (is_buf_mapped(sdesc))
485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 478 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 479 get_buf_size(adapter, sdesc),
480 PCI_DMA_FROMDEVICE);
487 put_page(sdesc->page); 481 put_page(sdesc->page);
488 sdesc->page = NULL; 482 sdesc->page = NULL;
489 if (++fl->cidx == fl->size) 483 if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
511 505
512 if (is_buf_mapped(sdesc)) 506 if (is_buf_mapped(sdesc))
513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 507 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 508 get_buf_size(adapter, sdesc),
509 PCI_DMA_FROMDEVICE);
515 sdesc->page = NULL; 510 sdesc->page = NULL;
516 if (++fl->cidx == fl->size) 511 if (++fl->cidx == fl->size)
517 fl->cidx = 0; 512 fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 584static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
590 int n, gfp_t gfp) 585 int n, gfp_t gfp)
591{ 586{
587 struct sge *s = &adapter->sge;
592 struct page *page; 588 struct page *page;
593 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
594 unsigned int cred = fl->avail; 590 unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
608 * If we don't support large pages, drop directly into the small page 604 * If we don't support large pages, drop directly into the small page
609 * allocation code. 605 * allocation code.
610 */ 606 */
611 if (FL_PG_ORDER == 0) 607 if (s->fl_pg_order == 0)
612 goto alloc_small_pages; 608 goto alloc_small_pages;
613 609
614 while (n) { 610 while (n) {
615 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 611 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
616 FL_PG_ORDER); 612 s->fl_pg_order);
617 if (unlikely(!page)) { 613 if (unlikely(!page)) {
618 /* 614 /*
619 * We've failed inour attempt to allocate a "large 615 * We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
623 fl->large_alloc_failed++; 619 fl->large_alloc_failed++;
624 break; 620 break;
625 } 621 }
626 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 622 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
627 623
628 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 624 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
629 PAGE_SIZE << FL_PG_ORDER, 625 PAGE_SIZE << s->fl_pg_order,
630 PCI_DMA_FROMDEVICE); 626 PCI_DMA_FROMDEVICE);
631 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 627 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
632 /* 628 /*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
637 * because DMA mapping resources are typically 633 * because DMA mapping resources are typically
638 * critical resources once they become scarse. 634 * critical resources once they become scarse.
639 */ 635 */
640 __free_pages(page, FL_PG_ORDER); 636 __free_pages(page, s->fl_pg_order);
641 goto out; 637 goto out;
642 } 638 }
643 dma_addr |= RX_LARGE_BUF; 639 dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
693 fl->pend_cred += cred; 689 fl->pend_cred += cred;
694 ring_fl_db(adapter, fl); 690 ring_fl_db(adapter, fl);
695 691
696 if (unlikely(fl_starving(fl))) { 692 if (unlikely(fl_starving(adapter, fl))) {
697 smp_wmb(); 693 smp_wmb();
698 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 694 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
699 } 695 }
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
1468static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1464static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1469 const struct cpl_rx_pkt *pkt) 1465 const struct cpl_rx_pkt *pkt)
1470{ 1466{
1467 struct adapter *adapter = rxq->rspq.adapter;
1468 struct sge *s = &adapter->sge;
1471 int ret; 1469 int ret;
1472 struct sk_buff *skb; 1470 struct sk_buff *skb;
1473 1471
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1478 return; 1476 return;
1479 } 1477 }
1480 1478
1481 copy_frags(skb, gl, PKTSHIFT); 1479 copy_frags(skb, gl, s->pktshift);
1482 skb->len = gl->tot_len - PKTSHIFT; 1480 skb->len = gl->tot_len - s->pktshift;
1483 skb->data_len = skb->len; 1481 skb->data_len = skb->len;
1484 skb->truesize += skb->data_len; 1482 skb->truesize += skb->data_len;
1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1516 bool csum_ok = pkt->csum_calc && !pkt->err_vec && 1514 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1517 (rspq->netdev->features & NETIF_F_RXCSUM); 1515 (rspq->netdev->features & NETIF_F_RXCSUM);
1518 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1516 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1517 struct adapter *adapter = rspq->adapter;
1518 struct sge *s = &adapter->sge;
1519 1519
1520 /* 1520 /*
1521 * If this is a good TCP packet and we have Generic Receive Offload 1521 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1537 rxq->stats.rx_drops++; 1537 rxq->stats.rx_drops++;
1538 return 0; 1538 return 0;
1539 } 1539 }
1540 __skb_pull(skb, PKTSHIFT); 1540 __skb_pull(skb, s->pktshift);
1541 skb->protocol = eth_type_trans(skb, rspq->netdev); 1541 skb->protocol = eth_type_trans(skb, rspq->netdev);
1542 skb_record_rx_queue(skb, rspq->idx); 1542 skb_record_rx_queue(skb, rspq->idx);
1543 rxq->stats.pkts++; 1543 rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
1648static int process_responses(struct sge_rspq *rspq, int budget) 1648static int process_responses(struct sge_rspq *rspq, int budget)
1649{ 1649{
1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1651 struct adapter *adapter = rspq->adapter;
1652 struct sge *s = &adapter->sge;
1651 int budget_left = budget; 1653 int budget_left = budget;
1652 1654
1653 while (likely(budget_left)) { 1655 while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1697 BUG_ON(frag >= MAX_SKB_FRAGS); 1699 BUG_ON(frag >= MAX_SKB_FRAGS);
1698 BUG_ON(rxq->fl.avail == 0); 1700 BUG_ON(rxq->fl.avail == 0);
1699 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1700 bufsz = get_buf_size(sdesc); 1702 bufsz = get_buf_size(adapter, sdesc);
1701 fp->page = sdesc->page; 1703 fp->page = sdesc->page;
1702 fp->offset = rspq->offset; 1704 fp->offset = rspq->offset;
1703 fp->size = min(bufsz, len); 1705 fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1726 */ 1728 */
1727 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1728 if (likely(ret == 0)) 1730 if (likely(ret == 0))
1729 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1731 rspq->offset += ALIGN(fp->size, s->fl_align);
1730 else 1732 else
1731 restore_rx_bufs(&gl, &rxq->fl, frag); 1733 restore_rx_bufs(&gl, &rxq->fl, frag);
1732 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
1963 * schedule napi but the FL is no longer starving. 1965 * schedule napi but the FL is no longer starving.
1964 * No biggie. 1966 * No biggie.
1965 */ 1967 */
1966 if (fl_starving(fl)) { 1968 if (fl_starving(adapter, fl)) {
1967 struct sge_eth_rxq *rxq; 1969 struct sge_eth_rxq *rxq;
1968 1970
1969 rxq = container_of(fl, struct sge_eth_rxq, fl); 1971 rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2047 int intr_dest, 2049 int intr_dest,
2048 struct sge_fl *fl, rspq_handler_t hnd) 2050 struct sge_fl *fl, rspq_handler_t hnd)
2049{ 2051{
2052 struct sge *s = &adapter->sge;
2050 struct port_info *pi = netdev_priv(dev); 2053 struct port_info *pi = netdev_priv(dev);
2051 struct fw_iq_cmd cmd, rpl; 2054 struct fw_iq_cmd cmd, rpl;
2052 int ret, iqandst, flsz = 0; 2055 int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2117 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2120 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2118 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2121 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2119 sizeof(__be64), sizeof(struct rx_sw_desc), 2122 sizeof(__be64), sizeof(struct rx_sw_desc),
2120 &fl->addr, &fl->sdesc, STAT_LEN); 2123 &fl->addr, &fl->sdesc, s->stat_len);
2121 if (!fl->desc) { 2124 if (!fl->desc) {
2122 ret = -ENOMEM; 2125 ret = -ENOMEM;
2123 goto err; 2126 goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2129 * free list ring) in Egress Queue Units. 2132 * free list ring) in Egress Queue Units.
2130 */ 2133 */
2131 flsz = (fl->size / FL_PER_EQ_UNIT + 2134 flsz = (fl->size / FL_PER_EQ_UNIT +
2132 STAT_LEN / EQ_UNIT); 2135 s->stat_len / EQ_UNIT);
2133 2136
2134 /* 2137 /*
2135 * Fill in all the relevant firmware Ingress Queue Command 2138 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2217 struct net_device *dev, struct netdev_queue *devq, 2220 struct net_device *dev, struct netdev_queue *devq,
2218 unsigned int iqid) 2221 unsigned int iqid)
2219{ 2222{
2223 struct sge *s = &adapter->sge;
2220 int ret, nentries; 2224 int ret, nentries;
2221 struct fw_eq_eth_cmd cmd, rpl; 2225 struct fw_eq_eth_cmd cmd, rpl;
2222 struct port_info *pi = netdev_priv(dev); 2226 struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2225 * Calculate the size of the hardware TX Queue (including the Status 2229 * Calculate the size of the hardware TX Queue (including the Status
2226 * Page on the end of the TX Queue) in units of TX Descriptors. 2230 * Page on the end of the TX Queue) in units of TX Descriptors.
2227 */ 2231 */
2228 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2232 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2229 2233
2230 /* 2234 /*
2231 * Allocate the hardware ring for the TX ring (with space for its 2235 * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2234 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2238 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2235 sizeof(struct tx_desc), 2239 sizeof(struct tx_desc),
2236 sizeof(struct tx_sw_desc), 2240 sizeof(struct tx_sw_desc),
2237 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2241 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2238 if (!txq->q.desc) 2242 if (!txq->q.desc)
2239 return -ENOMEM; 2243 return -ENOMEM;
2240 2244
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2307 */ 2311 */
2308static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2312static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2309{ 2313{
2314 struct sge *s = &adapter->sge;
2315
2310 dma_free_coherent(adapter->pdev_dev, 2316 dma_free_coherent(adapter->pdev_dev,
2311 tq->size * sizeof(*tq->desc) + STAT_LEN, 2317 tq->size * sizeof(*tq->desc) + s->stat_len,
2312 tq->desc, tq->phys_addr); 2318 tq->desc, tq->phys_addr);
2313 tq->cntxt_id = 0; 2319 tq->cntxt_id = 0;
2314 tq->sdesc = NULL; 2320 tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2322static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2328static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2323 struct sge_fl *fl) 2329 struct sge_fl *fl)
2324{ 2330{
2331 struct sge *s = &adapter->sge;
2325 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2332 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2326 2333
2327 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2334 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2337 if (fl) { 2344 if (fl) {
2338 free_rx_bufs(adapter, fl, fl->avail); 2345 free_rx_bufs(adapter, fl, fl->avail);
2339 dma_free_coherent(adapter->pdev_dev, 2346 dma_free_coherent(adapter->pdev_dev,
2340 fl->size * sizeof(*fl->desc) + STAT_LEN, 2347 fl->size * sizeof(*fl->desc) + s->stat_len,
2341 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2342 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2343 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
2423 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2430 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2424 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2431 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2425 struct sge *s = &adapter->sge; 2432 struct sge *s = &adapter->sge;
2433 unsigned int ingpadboundary, ingpackboundary;
2426 2434
2427 /* 2435 /*
2428 * Start by vetting the basic SGE parameters which have been set up by 2436 * Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
2443 * Now translate the adapter parameters into our internal forms. 2451 * Now translate the adapter parameters into our internal forms.
2444 */ 2452 */
2445 if (fl1) 2453 if (fl1)
2446 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2454 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2447 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2455 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2448 ? 128 : 64); 2456 ? 128 : 64);
2449 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2457 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
2450 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2458
2451 SGE_INGPADBOUNDARY_SHIFT); 2459 /* T4 uses a single control field to specify both the PCIe Padding and
2460 * Packing Boundary. T5 introduced the ability to specify these
2461 * separately. The actual Ingress Packet Data alignment boundary
2462 * within Packed Buffer Mode is the maximum of these two
2463 * specifications. (Note that it makes no real practical sense to
2464 * have the Pading Boudary be larger than the Packing Boundary but you
2465 * could set the chip up that way and, in fact, legacy T4 code would
2466 * end doing this because it would initialize the Padding Boundary and
2467 * leave the Packing Boundary initialized to 0 (16 bytes).)
2468 */
2469 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2470 X_INGPADBOUNDARY_SHIFT);
2471 if (is_t4(adapter->params.chip)) {
2472 s->fl_align = ingpadboundary;
2473 } else {
2474 /* T5 has a different interpretation of one of the PCIe Packing
2475 * Boundary values.
2476 */
2477 ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2478 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2479 ingpackboundary = 16;
2480 else
2481 ingpackboundary = 1 << (ingpackboundary +
2482 INGPACKBOUNDARY_SHIFT_X);
2483
2484 s->fl_align = max(ingpadboundary, ingpackboundary);
2485 }
2486
2487 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2488 * timer will attempt to refill it. This needs to be larger than the
2489 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2490 * stuck waiting for new packets while the SGE is waiting for us to
2491 * give it more Free List entries. (Note that the SGE's Egress
2492 * Congestion Threshold is in units of 2 Free List pointers.)
2493 */
2494 s->fl_starve_thres
2495 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
2452 2496
2453 /* 2497 /*
2454 * Set up tasklet timers. 2498 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 95df61dcb4ce..4b6a6d14d86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
134 */ 134 */
135struct sge_params { 135struct sge_params {
136 u32 sge_control; /* padding, boundaries, lengths, etc. */ 136 u32 sge_control; /* padding, boundaries, lengths, etc. */
137 u32 sge_control2; /* T5: more of the same */
137 u32 sge_host_page_size; /* RDMA page sizes */ 138 u32 sge_host_page_size; /* RDMA page sizes */
138 u32 sge_queues_per_page; /* RDMA queues/page */ 139 u32 sge_queues_per_page; /* RDMA queues/page */
139 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ 140 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
140 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ 141 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
141 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ 142 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
143 u32 sge_congestion_control; /* congestion thresholds, etc. */
142 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ 144 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
143 u32 sge_timer_value_2_and_3; 145 u32 sge_timer_value_2_and_3;
144 u32 sge_timer_value_4_and_5; 146 u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index e984fdc48ba2..1e896b923234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
468 sge_params->sge_timer_value_2_and_3 = vals[5]; 468 sge_params->sge_timer_value_2_and_3 = vals[5];
469 sge_params->sge_timer_value_4_and_5 = vals[6]; 469 sge_params->sge_timer_value_4_and_5 = vals[6];
470 470
471 /* T4 uses a single control field to specify both the PCIe Padding and
472 * Packing Boundary. T5 introduced the ability to specify these
473 * separately with the Padding Boundary in SGE_CONTROL and and Packing
474 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
475 * SGE_CONTROL in order to determine how ingress packet data will be
476 * laid out in Packed Buffer Mode. Unfortunately, older versions of
477 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
478 * failure grabbing it we throw an error since we can't figure out the
479 * right value.
480 */
481 if (!is_t4(adapter->params.chip)) {
482 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
483 FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
484 v = t4vf_query_params(adapter, 1, params, vals);
485 if (v != FW_SUCCESS) {
486 dev_err(adapter->pdev_dev,
487 "Unable to get SGE Control2; "
488 "probably old firmware.\n");
489 return v;
490 }
491 sge_params->sge_control2 = vals[0];
492 }
493
471 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 494 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
472 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); 495 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
473 v = t4vf_query_params(adapter, 1, params, vals); 496 params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
497 FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
498 v = t4vf_query_params(adapter, 2, params, vals);
474 if (v) 499 if (v)
475 return v; 500 return v;
476 sge_params->sge_ingress_rx_threshold = vals[0]; 501 sge_params->sge_ingress_rx_threshold = vals[0];
502 sge_params->sge_congestion_control = vals[1];
477 503
478 return 0; 504 return 0;
479} 505}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 180e53fa628f..73cf1653a4a3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
940 struct vnic_rq_buf *buf = rq->to_use; 940 struct vnic_rq_buf *buf = rq->to_use;
941 941
942 if (buf->os_buf) { 942 if (buf->os_buf) {
943 buf = buf->next; 943 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
944 rq->to_use = buf; 944 buf->len);
945 rq->ring.desc_avail--;
946 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
947 /* Adding write memory barrier prevents compiler and/or
948 * CPU reordering, thus avoiding descriptor posting
949 * before descriptor is initialized. Otherwise, hardware
950 * can read stale descriptor fields.
951 */
952 wmb();
953 iowrite32(buf->index, &rq->ctrl->posted_index);
954 }
955 945
956 return 0; 946 return 0;
957 } 947 }
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1037 enic->rq_truncated_pkts++; 1027 enic->rq_truncated_pkts++;
1038 } 1028 }
1039 1029
1030 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1031 PCI_DMA_FROMDEVICE);
1040 dev_kfree_skb_any(skb); 1032 dev_kfree_skb_any(skb);
1033 buf->os_buf = NULL;
1041 1034
1042 return; 1035 return;
1043 } 1036 }
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1088 /* Buffer overflow 1081 /* Buffer overflow
1089 */ 1082 */
1090 1083
1084 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1085 PCI_DMA_FROMDEVICE);
1091 dev_kfree_skb_any(skb); 1086 dev_kfree_skb_any(skb);
1087 buf->os_buf = NULL;
1092 } 1088 }
1093} 1089}
1094 1090
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 50a851db2852..3dca494797bd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
298 return bufaddr; 298 return bufaddr;
299} 299}
300 300
301static void swap_buffer2(void *dst_buf, void *src_buf, int len)
302{
303 int i;
304 unsigned int *src = src_buf;
305 unsigned int *dst = dst_buf;
306
307 for (i = 0; i < len; i += 4, src++, dst++)
308 *dst = swab32p(src);
309}
310
301static void fec_dump(struct net_device *ndev) 311static void fec_dump(struct net_device *ndev)
302{ 312{
303 struct fec_enet_private *fep = netdev_priv(ndev); 313 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1307} 1317}
1308 1318
1309static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1319static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1310 struct bufdesc *bdp, u32 length) 1320 struct bufdesc *bdp, u32 length, bool swap)
1311{ 1321{
1312 struct fec_enet_private *fep = netdev_priv(ndev); 1322 struct fec_enet_private *fep = netdev_priv(ndev);
1313 struct sk_buff *new_skb; 1323 struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1322 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1332 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1323 FEC_ENET_RX_FRSIZE - fep->rx_align, 1333 FEC_ENET_RX_FRSIZE - fep->rx_align,
1324 DMA_FROM_DEVICE); 1334 DMA_FROM_DEVICE);
1325 memcpy(new_skb->data, (*skb)->data, length); 1335 if (!swap)
1336 memcpy(new_skb->data, (*skb)->data, length);
1337 else
1338 swap_buffer2(new_skb->data, (*skb)->data, length);
1326 *skb = new_skb; 1339 *skb = new_skb;
1327 1340
1328 return true; 1341 return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1352 u16 vlan_tag; 1365 u16 vlan_tag;
1353 int index = 0; 1366 int index = 0;
1354 bool is_copybreak; 1367 bool is_copybreak;
1368 bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
1355 1369
1356#ifdef CONFIG_M532x 1370#ifdef CONFIG_M532x
1357 flush_cache_all(); 1371 flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1415 * include that when passing upstream as it messes up 1429 * include that when passing upstream as it messes up
1416 * bridging applications. 1430 * bridging applications.
1417 */ 1431 */
1418 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); 1432 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1433 need_swap);
1419 if (!is_copybreak) { 1434 if (!is_copybreak) {
1420 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1435 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1421 if (unlikely(!skb_new)) { 1436 if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1430 prefetch(skb->data - NET_IP_ALIGN); 1445 prefetch(skb->data - NET_IP_ALIGN);
1431 skb_put(skb, pkt_len - 4); 1446 skb_put(skb, pkt_len - 4);
1432 data = skb->data; 1447 data = skb->data;
1433 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 1448 if (!is_copybreak && need_swap)
1434 swap_buffer(data, pkt_len); 1449 swap_buffer(data, pkt_len);
1435 1450
1436 /* Extract the enhanced buffer descriptor */ 1451 /* Extract the enhanced buffer descriptor */
@@ -3343,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
3343 netif_device_detach(ndev); 3358 netif_device_detach(ndev);
3344 netif_tx_unlock_bh(ndev); 3359 netif_tx_unlock_bh(ndev);
3345 fec_stop(ndev); 3360 fec_stop(ndev);
3361 fec_enet_clk_enable(ndev, false);
3362 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3346 } 3363 }
3347 rtnl_unlock(); 3364 rtnl_unlock();
3348 3365
3349 fec_enet_clk_enable(ndev, false);
3350 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3351
3352 if (fep->reg_phy) 3366 if (fep->reg_phy)
3353 regulator_disable(fep->reg_phy); 3367 regulator_disable(fep->reg_phy);
3354 3368
@@ -3367,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
3367 return ret; 3381 return ret;
3368 } 3382 }
3369 3383
3370 pinctrl_pm_select_default_state(&fep->pdev->dev);
3371 ret = fec_enet_clk_enable(ndev, true);
3372 if (ret)
3373 goto failed_clk;
3374
3375 rtnl_lock(); 3384 rtnl_lock();
3376 if (netif_running(ndev)) { 3385 if (netif_running(ndev)) {
3386 pinctrl_pm_select_default_state(&fep->pdev->dev);
3387 ret = fec_enet_clk_enable(ndev, true);
3388 if (ret) {
3389 rtnl_unlock();
3390 goto failed_clk;
3391 }
3377 fec_restart(ndev); 3392 fec_restart(ndev);
3378 netif_tx_lock_bh(ndev); 3393 netif_tx_lock_bh(ndev);
3379 netif_device_attach(ndev); 3394 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f27c35..28b81ae09b5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
635 **/ 635 **/
636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) 636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
637{ 637{
638 s32 status;
639 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 638 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
640 bool autoneg = false; 639 bool autoneg = false;
641 ixgbe_link_speed speed; 640 ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
700 699
701 hw->phy.ops.write_reg(hw, MDIO_CTRL1, 700 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
702 MDIO_MMD_AN, autoneg_reg); 701 MDIO_MMD_AN, autoneg_reg);
703 702 return 0;
704 return status;
705} 703}
706 704
707/** 705/**
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b151a949f352..d44560d1d268 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1047 int tx_index; 1047 int tx_index;
1048 struct tx_desc *desc; 1048 struct tx_desc *desc;
1049 u32 cmd_sts; 1049 u32 cmd_sts;
1050 struct sk_buff *skb;
1051 1050
1052 tx_index = txq->tx_used_desc; 1051 tx_index = txq->tx_used_desc;
1053 desc = &txq->tx_desc_area[tx_index]; 1052 desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1066 reclaimed++; 1065 reclaimed++;
1067 txq->tx_desc_count--; 1066 txq->tx_desc_count--;
1068 1067
1069 skb = NULL; 1068 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
1070 if (cmd_sts & TX_LAST_DESC) 1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1071 skb = __skb_dequeue(&txq->tx_skb); 1070 desc->byte_cnt, DMA_TO_DEVICE);
1071
1072 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1073 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1074
1075 if (!WARN_ON(!skb))
1076 dev_kfree_skb(skb);
1077 }
1072 1078
1073 if (cmd_sts & ERROR_SUMMARY) { 1079 if (cmd_sts & ERROR_SUMMARY) {
1074 netdev_info(mp->dev, "tx error\n"); 1080 netdev_info(mp->dev, "tx error\n");
1075 mp->dev->stats.tx_errors++; 1081 mp->dev->stats.tx_errors++;
1076 } 1082 }
1077 1083
1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
1079 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1080 desc->byte_cnt, DMA_TO_DEVICE);
1081 dev_kfree_skb(skb);
1082 } 1084 }
1083 1085
1084 __netif_tx_unlock_bh(nq); 1086 __netif_tx_unlock_bh(nq);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ece83f101526..fdf3e382e464 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1692{ 1692{
1693 struct mvpp2_prs_entry *pe; 1693 struct mvpp2_prs_entry *pe;
1694 int tid_aux, tid; 1694 int tid_aux, tid;
1695 int ret = 0;
1695 1696
1696 pe = mvpp2_prs_vlan_find(priv, tpid, ai); 1697 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1697 1698
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1723 break; 1724 break;
1724 } 1725 }
1725 1726
1726 if (tid <= tid_aux) 1727 if (tid <= tid_aux) {
1727 return -EINVAL; 1728 ret = -EINVAL;
1729 goto error;
1730 }
1728 1731
1729 memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); 1732 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1730 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 1733 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1756 1759
1757 mvpp2_prs_hw_write(priv, pe); 1760 mvpp2_prs_hw_write(priv, pe);
1758 1761
1762error:
1759 kfree(pe); 1763 kfree(pe);
1760 1764
1761 return 0; 1765 return ret;
1762} 1766}
1763 1767
1764/* Get first free double vlan ai number */ 1768/* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1821 unsigned int port_map) 1825 unsigned int port_map)
1822{ 1826{
1823 struct mvpp2_prs_entry *pe; 1827 struct mvpp2_prs_entry *pe;
1824 int tid_aux, tid, ai; 1828 int tid_aux, tid, ai, ret = 0;
1825 1829
1826 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); 1830 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1827 1831
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1838 1842
1839 /* Set ai value for new double vlan entry */ 1843 /* Set ai value for new double vlan entry */
1840 ai = mvpp2_prs_double_vlan_ai_free_get(priv); 1844 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1841 if (ai < 0) 1845 if (ai < 0) {
1842 return ai; 1846 ret = ai;
1847 goto error;
1848 }
1843 1849
1844 /* Get first single/triple vlan tid */ 1850 /* Get first single/triple vlan tid */
1845 for (tid_aux = MVPP2_PE_FIRST_FREE_TID; 1851 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1859 break; 1865 break;
1860 } 1866 }
1861 1867
1862 if (tid >= tid_aux) 1868 if (tid >= tid_aux) {
1863 return -ERANGE; 1869 ret = -ERANGE;
1870 goto error;
1871 }
1864 1872
1865 memset(pe, 0, sizeof(struct mvpp2_prs_entry)); 1873 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1866 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 1874 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1887 mvpp2_prs_tcam_port_map_set(pe, port_map); 1895 mvpp2_prs_tcam_port_map_set(pe, port_map);
1888 mvpp2_prs_hw_write(priv, pe); 1896 mvpp2_prs_hw_write(priv, pe);
1889 1897
1898error:
1890 kfree(pe); 1899 kfree(pe);
1891 return 0; 1900 return ret;
1892} 1901}
1893 1902
1894/* IPv4 header parsing for fragmentation and L4 offset */ 1903/* IPv4 header parsing for fragmentation and L4 offset */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fec8fce..02266e3de514 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2281 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2281 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2282 VXLAN_STEER_BY_OUTER_MAC, 1); 2282 VXLAN_STEER_BY_OUTER_MAC, 1);
2283out: 2283out:
2284 if (ret) 2284 if (ret) {
2285 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2285 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2286 return;
2287 }
2288
2289 /* set offloads */
2290 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2291 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2292 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2293 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2286} 2294}
2287 2295
2288static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2296static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2290 int ret; 2298 int ret;
2291 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2299 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2292 vxlan_del_task); 2300 vxlan_del_task);
2301 /* unset offloads */
2302 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2303 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2304 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2305 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2293 2306
2294 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2307 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2295 VXLAN_STEER_BY_OUTER_MAC, 0); 2308 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2568,13 +2581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2568 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 2581 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2569 dev->priv_flags |= IFF_UNICAST_FLT; 2582 dev->priv_flags |= IFF_UNICAST_FLT;
2570 2583
2571 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2572 dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2573 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2574 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2575 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2576 }
2577
2578 mdev->pndev[port] = dev; 2584 mdev->pndev[port] = dev;
2579 2585
2580 netif_carrier_off(dev); 2586 netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index a278238a2db6..ad2c96a02a53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
374 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", 374 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
375 name, pci_name(dev->pdev)); 375 name, pci_name(dev->pdev));
376 eq->eqn = out.eq_number; 376 eq->eqn = out.eq_number;
377 eq->irqn = vecidx;
378 eq->dev = dev;
379 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
377 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 380 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
378 eq->name, eq); 381 eq->name, eq);
379 if (err) 382 if (err)
380 goto err_eq; 383 goto err_eq;
381 384
382 eq->irqn = vecidx;
383 eq->dev = dev;
384 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
385
386 err = mlx5_debug_eq_add(dev, eq); 385 err = mlx5_debug_eq_add(dev, eq);
387 if (err) 386 if (err)
388 goto err_irq; 387 goto err_irq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d8e8e489b2d..71b10b210792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
864 dev->profile = &profile[prof_sel]; 864 dev->profile = &profile[prof_sel];
865 dev->event = mlx5_core_event; 865 dev->event = mlx5_core_event;
866 866
867 INIT_LIST_HEAD(&priv->ctx_list);
868 spin_lock_init(&priv->ctx_lock);
867 err = mlx5_dev_init(dev, pdev); 869 err = mlx5_dev_init(dev, pdev);
868 if (err) { 870 if (err) {
869 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); 871 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
870 goto out; 872 goto out;
871 } 873 }
872 874
873 INIT_LIST_HEAD(&priv->ctx_list);
874 spin_lock_init(&priv->ctx_lock);
875 err = mlx5_register_device(dev); 875 err = mlx5_register_device(dev);
876 if (err) { 876 if (err) {
877 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 877 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 0b2a1ccd276d..613037584d08 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
2762 if (test_bit(__NX_RESETTING, &adapter->state)) 2762 if (test_bit(__NX_RESETTING, &adapter->state))
2763 goto reschedule; 2763 goto reschedule;
2764 2764
2765 if (test_bit(__NX_DEV_UP, &adapter->state)) { 2765 if (test_bit(__NX_DEV_UP, &adapter->state) &&
2766 !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
2766 if (!adapter->has_link_events) { 2767 if (!adapter->has_link_events) {
2767 2768
2768 netxen_nic_handle_phy_intr(adapter); 2769 netxen_nic_handle_phy_intr(adapter);
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index f3a47147937d..9a49f42ac2ba 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -5,7 +5,6 @@
5config NET_VENDOR_QUALCOMM 5config NET_VENDOR_QUALCOMM
6 bool "Qualcomm devices" 6 bool "Qualcomm devices"
7 default y 7 default y
8 depends on SPI_MASTER && OF_GPIO
9 ---help--- 8 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 9 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 10 and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
20 19
21config QCA7000 20config QCA7000
22 tristate "Qualcomm Atheros QCA7000 support" 21 tristate "Qualcomm Atheros QCA7000 support"
23 depends on SPI_MASTER && OF_GPIO 22 depends on SPI_MASTER && OF
24 ---help--- 23 ---help---
25 This SPI protocol driver supports the Qualcomm Atheros QCA7000. 24 This SPI protocol driver supports the Qualcomm Atheros QCA7000.
26 25
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 002d4cdc319f..a77f05ce8325 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
180 EFX_MAX_CHANNELS, 180 EFX_MAX_CHANNELS,
181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / 181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); 182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
183 BUG_ON(efx->max_channels == 0); 183 if (WARN_ON(efx->max_channels == 0))
184 return -EIO;
184 185
185 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 186 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
186 if (!nic_data) 187 if (!nic_data)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 2c62208077fe..6cc3cf6f17c8 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2243,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
2243 const struct of_device_id *match = NULL; 2243 const struct of_device_id *match = NULL;
2244 struct smc_local *lp; 2244 struct smc_local *lp;
2245 struct net_device *ndev; 2245 struct net_device *ndev;
2246 struct resource *res, *ires; 2246 struct resource *res;
2247 unsigned int __iomem *addr; 2247 unsigned int __iomem *addr;
2248 unsigned long irq_flags = SMC_IRQ_FLAGS; 2248 unsigned long irq_flags = SMC_IRQ_FLAGS;
2249 unsigned long irq_resflags;
2249 int ret; 2250 int ret;
2250 2251
2251 ndev = alloc_etherdev(sizeof(struct smc_local)); 2252 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2337,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
2337 goto out_free_netdev; 2338 goto out_free_netdev;
2338 } 2339 }
2339 2340
2340 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2341 ndev->irq = platform_get_irq(pdev, 0);
2341 if (!ires) { 2342 if (ndev->irq <= 0) {
2342 ret = -ENODEV; 2343 ret = -ENODEV;
2343 goto out_release_io; 2344 goto out_release_io;
2344 } 2345 }
2345 2346 /*
2346 ndev->irq = ires->start; 2347 * If this platform does not specify any special irqflags, or if
2347 2348 * the resource supplies a trigger, override the irqflags with
2348 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) 2349 * the trigger flags from the resource.
2349 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2350 */
2351 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2352 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2353 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2350 2354
2351 ret = smc_request_attrib(pdev, ndev); 2355 ret = smc_request_attrib(pdev, ndev);
2352 if (ret) 2356 if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index affb29da353e..77ed74561e5f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1342 spin_unlock(&pdata->mac_lock); 1342 spin_unlock(&pdata->mac_lock);
1343} 1343}
1344 1344
1345static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1346{
1347 int rc = 0;
1348
1349 if (!pdata->phy_dev)
1350 return rc;
1351
1352 /* If the internal PHY is in General Power-Down mode, all, except the
1353 * management interface, is powered-down and stays in that condition as
1354 * long as Phy register bit 0.11 is HIGH.
1355 *
1356 * In that case, clear the bit 0.11, so the PHY powers up and we can
1357 * access to the phy registers.
1358 */
1359 rc = phy_read(pdata->phy_dev, MII_BMCR);
1360 if (rc < 0) {
1361 SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
1362 return rc;
1363 }
1364
1365 /* If the PHY general power-down bit is not set is not necessary to
1366 * disable the general power down-mode.
1367 */
1368 if (rc & BMCR_PDOWN) {
1369 rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
1370 if (rc < 0) {
1371 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1372 return rc;
1373 }
1374
1375 usleep_range(1000, 1500);
1376 }
1377
1378 return 0;
1379}
1380
1345static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) 1381static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1346{ 1382{
1347 int rc = 0; 1383 int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1356 return rc; 1392 return rc;
1357 } 1393 }
1358 1394
1359 /* 1395 /* Only disable if energy detect mode is already enabled */
1360 * If energy is detected the PHY is already awake so is not necessary 1396 if (rc & MII_LAN83C185_EDPWRDOWN) {
1361 * to disable the energy detect power-down mode.
1362 */
1363 if ((rc & MII_LAN83C185_EDPWRDOWN) &&
1364 !(rc & MII_LAN83C185_ENERGYON)) {
1365 /* Disable energy detect mode for this SMSC Transceivers */ 1397 /* Disable energy detect mode for this SMSC Transceivers */
1366 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1398 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
1367 rc & (~MII_LAN83C185_EDPWRDOWN)); 1399 rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1370 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1402 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1371 return rc; 1403 return rc;
1372 } 1404 }
1373 1405 /* Allow PHY to wakeup */
1374 mdelay(1); 1406 mdelay(2);
1375 } 1407 }
1376 1408
1377 return 0; 1409 return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1393 1425
1394 /* Only enable if energy detect mode is already disabled */ 1426 /* Only enable if energy detect mode is already disabled */
1395 if (!(rc & MII_LAN83C185_EDPWRDOWN)) { 1427 if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
1396 mdelay(100);
1397 /* Enable energy detect mode for this SMSC Transceivers */ 1428 /* Enable energy detect mode for this SMSC Transceivers */
1398 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1429 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
1399 rc | MII_LAN83C185_EDPWRDOWN); 1430 rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1402 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1433 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1403 return rc; 1434 return rc;
1404 } 1435 }
1405
1406 mdelay(1);
1407 } 1436 }
1408 return 0; 1437 return 0;
1409} 1438}
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
1415 int ret; 1444 int ret;
1416 1445
1417 /* 1446 /*
1447 * Make sure to power-up the PHY chip before doing a reset, otherwise
1448 * the reset fails.
1449 */
1450 ret = smsc911x_phy_general_power_up(pdata);
1451 if (ret) {
1452 SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
1453 return ret;
1454 }
1455
1456 /*
1418 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that 1457 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
1419 * are initialized in a Energy Detect Power-Down mode that prevents 1458 * are initialized in a Energy Detect Power-Down mode that prevents
1420 * the MAC chip to be software reseted. So we have to wakeup the PHY 1459 * the MAC chip to be software reseted. So we have to wakeup the PHY
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f77a46c7e2c..18c46bb0f3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
276bool stmmac_eee_init(struct stmmac_priv *priv) 276bool stmmac_eee_init(struct stmmac_priv *priv)
277{ 277{
278 char *phy_bus_name = priv->plat->phy_bus_name; 278 char *phy_bus_name = priv->plat->phy_bus_name;
279 unsigned long flags;
279 bool ret = false; 280 bool ret = false;
280 281
281 /* Using PCS we cannot dial with the phy registers at this stage 282 /* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
300 * changed). 301 * changed).
301 * In that case the driver disable own timers. 302 * In that case the driver disable own timers.
302 */ 303 */
304 spin_lock_irqsave(&priv->lock, flags);
303 if (priv->eee_active) { 305 if (priv->eee_active) {
304 pr_debug("stmmac: disable EEE\n"); 306 pr_debug("stmmac: disable EEE\n");
305 del_timer_sync(&priv->eee_ctrl_timer); 307 del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
307 tx_lpi_timer); 309 tx_lpi_timer);
308 } 310 }
309 priv->eee_active = 0; 311 priv->eee_active = 0;
312 spin_unlock_irqrestore(&priv->lock, flags);
310 goto out; 313 goto out;
311 } 314 }
312 /* Activate the EEE and start timers */ 315 /* Activate the EEE and start timers */
316 spin_lock_irqsave(&priv->lock, flags);
313 if (!priv->eee_active) { 317 if (!priv->eee_active) {
314 priv->eee_active = 1; 318 priv->eee_active = 1;
315 init_timer(&priv->eee_ctrl_timer); 319 init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
325 /* Set HW EEE according to the speed */ 329 /* Set HW EEE according to the speed */
326 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); 330 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
327 331
328 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
329
330 ret = true; 332 ret = true;
333 spin_unlock_irqrestore(&priv->lock, flags);
334
335 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
331 } 336 }
332out: 337out:
333 return ret; 338 return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
760 if (new_state && netif_msg_link(priv)) 765 if (new_state && netif_msg_link(priv))
761 phy_print_status(phydev); 766 phy_print_status(phydev);
762 767
768 spin_unlock_irqrestore(&priv->lock, flags);
769
763 /* At this stage, it could be needed to setup the EEE or adjust some 770 /* At this stage, it could be needed to setup the EEE or adjust some
764 * MAC related HW registers. 771 * MAC related HW registers.
765 */ 772 */
766 priv->eee_enabled = stmmac_eee_init(priv); 773 priv->eee_enabled = stmmac_eee_init(priv);
767
768 spin_unlock_irqrestore(&priv->lock, flags);
769} 774}
770 775
771/** 776/**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
959} 964}
960 965
961static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 966static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
962 int i) 967 int i, gfp_t flags)
963{ 968{
964 struct sk_buff *skb; 969 struct sk_buff *skb;
965 970
966 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 971 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
967 GFP_KERNEL); 972 flags);
968 if (!skb) { 973 if (!skb) {
969 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 974 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
970 return -ENOMEM; 975 return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1006 * and allocates the socket buffers. It suppors the chained and ring 1011 * and allocates the socket buffers. It suppors the chained and ring
1007 * modes. 1012 * modes.
1008 */ 1013 */
1009static int init_dma_desc_rings(struct net_device *dev) 1014static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1010{ 1015{
1011 int i; 1016 int i;
1012 struct stmmac_priv *priv = netdev_priv(dev); 1017 struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
1041 else 1046 else
1042 p = priv->dma_rx + i; 1047 p = priv->dma_rx + i;
1043 1048
1044 ret = stmmac_init_rx_buffers(priv, p, i); 1049 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1045 if (ret) 1050 if (ret)
1046 goto err_init_rx_buffers; 1051 goto err_init_rx_buffers;
1047 1052
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
1647 struct stmmac_priv *priv = netdev_priv(dev); 1652 struct stmmac_priv *priv = netdev_priv(dev);
1648 int ret; 1653 int ret;
1649 1654
1650 ret = init_dma_desc_rings(dev);
1651 if (ret < 0) {
1652 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1653 return ret;
1654 }
1655 /* DMA initialization and SW reset */ 1655 /* DMA initialization and SW reset */
1656 ret = stmmac_init_dma_engine(priv); 1656 ret = stmmac_init_dma_engine(priv);
1657 if (ret < 0) { 1657 if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
1705 } 1705 }
1706 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 1706 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1707 1707
1708 priv->eee_enabled = stmmac_eee_init(priv);
1709
1710 stmmac_init_tx_coalesce(priv);
1711
1712 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 1708 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1713 priv->rx_riwt = MAX_DMA_RIWT; 1709 priv->rx_riwt = MAX_DMA_RIWT;
1714 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1710 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
1761 goto dma_desc_error; 1757 goto dma_desc_error;
1762 } 1758 }
1763 1759
1760 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1761 if (ret < 0) {
1762 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1763 goto init_error;
1764 }
1765
1764 ret = stmmac_hw_setup(dev); 1766 ret = stmmac_hw_setup(dev);
1765 if (ret < 0) { 1767 if (ret < 0) {
1766 pr_err("%s: Hw setup failed\n", __func__); 1768 pr_err("%s: Hw setup failed\n", __func__);
1767 goto init_error; 1769 goto init_error;
1768 } 1770 }
1769 1771
1772 stmmac_init_tx_coalesce(priv);
1773
1770 if (priv->phydev) 1774 if (priv->phydev)
1771 phy_start(priv->phydev); 1775 phy_start(priv->phydev);
1772 1776
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1894 unsigned int nopaged_len = skb_headlen(skb); 1898 unsigned int nopaged_len = skb_headlen(skb);
1895 unsigned int enh_desc = priv->plat->enh_desc; 1899 unsigned int enh_desc = priv->plat->enh_desc;
1896 1900
1901 spin_lock(&priv->tx_lock);
1902
1897 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1903 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1904 spin_unlock(&priv->tx_lock);
1898 if (!netif_queue_stopped(dev)) { 1905 if (!netif_queue_stopped(dev)) {
1899 netif_stop_queue(dev); 1906 netif_stop_queue(dev);
1900 /* This is a hard error, log it. */ 1907 /* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1903 return NETDEV_TX_BUSY; 1910 return NETDEV_TX_BUSY;
1904 } 1911 }
1905 1912
1906 spin_lock(&priv->tx_lock);
1907
1908 if (priv->tx_path_in_lpi_mode) 1913 if (priv->tx_path_in_lpi_mode)
1909 stmmac_disable_eee_mode(priv); 1914 stmmac_disable_eee_mode(priv);
1910 1915
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2025 return NETDEV_TX_OK; 2030 return NETDEV_TX_OK;
2026 2031
2027dma_map_err: 2032dma_map_err:
2033 spin_unlock(&priv->tx_lock);
2028 dev_err(priv->device, "Tx dma map failed\n"); 2034 dev_err(priv->device, "Tx dma map failed\n");
2029 dev_kfree_skb(skb); 2035 dev_kfree_skb(skb);
2030 priv->dev->stats.tx_dropped++; 2036 priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
2281{ 2287{
2282 struct stmmac_priv *priv = netdev_priv(dev); 2288 struct stmmac_priv *priv = netdev_priv(dev);
2283 2289
2284 spin_lock(&priv->lock);
2285 priv->hw->mac->set_filter(priv->hw, dev); 2290 priv->hw->mac->set_filter(priv->hw, dev);
2286 spin_unlock(&priv->lock);
2287} 2291}
2288 2292
2289/** 2293/**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
2950 stmmac_set_mac(priv->ioaddr, false); 2954 stmmac_set_mac(priv->ioaddr, false);
2951 pinctrl_pm_select_sleep_state(priv->device); 2955 pinctrl_pm_select_sleep_state(priv->device);
2952 /* Disable clock in case of PWM is off */ 2956 /* Disable clock in case of PWM is off */
2953 clk_disable_unprepare(priv->stmmac_clk); 2957 clk_disable(priv->stmmac_clk);
2954 } 2958 }
2955 spin_unlock_irqrestore(&priv->lock, flags); 2959 spin_unlock_irqrestore(&priv->lock, flags);
2956 2960
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
2982 } else { 2986 } else {
2983 pinctrl_pm_select_default_state(priv->device); 2987 pinctrl_pm_select_default_state(priv->device);
2984 /* enable the clk prevously disabled */ 2988 /* enable the clk prevously disabled */
2985 clk_prepare_enable(priv->stmmac_clk); 2989 clk_enable(priv->stmmac_clk);
2986 /* reset the phy so that it's ready */ 2990 /* reset the phy so that it's ready */
2987 if (priv->mii) 2991 if (priv->mii)
2988 stmmac_mdio_reset(priv->mii); 2992 stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
2990 2994
2991 netif_device_attach(ndev); 2995 netif_device_attach(ndev);
2992 2996
2997 init_dma_desc_rings(ndev, GFP_ATOMIC);
2993 stmmac_hw_setup(ndev); 2998 stmmac_hw_setup(ndev);
2999 stmmac_init_tx_coalesce(priv);
2994 3000
2995 napi_enable(&priv->napi); 3001 napi_enable(&priv->napi);
2996 3002
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..9c014803b03b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1262 HMD(("init rxring, ")); 1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) { 1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb; 1264 struct sk_buff *skb;
1265 u32 mapping;
1265 1266
1266 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1267 if (!skb) { 1268 if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1272 1273
1273 /* Because we reserve afterwards. */ 1274 /* Because we reserve afterwards. */
1274 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1275 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1276 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), 1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1277 dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, 1285 mapping);
1278 DMA_FROM_DEVICE));
1279 skb_reserve(skb, RX_OFFSET); 1286 skb_reserve(skb, RX_OFFSET);
1280 } 1287 }
1281 1288
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2020 skb = hp->rx_skbs[elem]; 2027 skb = hp->rx_skbs[elem];
2021 if (len > RX_COPY_THRESHOLD) { 2028 if (len > RX_COPY_THRESHOLD) {
2022 struct sk_buff *new_skb; 2029 struct sk_buff *new_skb;
2030 u32 mapping;
2023 2031
2024 /* Now refill the entry, if we can. */ 2032 /* Now refill the entry, if we can. */
2025 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 2033 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2027 drops++; 2035 drops++;
2028 goto drop_it; 2036 goto drop_it;
2029 } 2037 }
2038 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040 RX_BUF_ALLOC_SIZE,
2041 DMA_FROM_DEVICE);
2042 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043 dev_kfree_skb_any(new_skb);
2044 drops++;
2045 goto drop_it;
2046 }
2047
2030 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); 2048 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2031 hp->rx_skbs[elem] = new_skb; 2049 hp->rx_skbs[elem] = new_skb;
2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2033 hme_write_rxd(hp, this, 2050 hme_write_rxd(hp, this,
2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2051 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2035 dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, 2052 mapping);
2036 DMA_FROM_DEVICE));
2037 skb_reserve(new_skb, RX_OFFSET); 2053 skb_reserve(new_skb, RX_OFFSET);
2038 2054
2039 /* Trim the original skb for the netif. */ 2055 /* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
2248 netif_wake_queue(dev); 2264 netif_wake_queue(dev);
2249} 2265}
2250 2266
2267static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268 u32 first_len, u32 first_entry, u32 entry)
2269{
2270 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271
2272 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273
2274 first_entry = NEXT_TX(first_entry);
2275 while (first_entry != entry) {
2276 struct happy_meal_txd *this = &txbase[first_entry];
2277 u32 addr, len;
2278
2279 addr = hme_read_desc32(hp, &this->tx_addr);
2280 len = hme_read_desc32(hp, &this->tx_flags);
2281 len &= TXFLAG_SIZE;
2282 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283 }
2284}
2285
2251static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, 2286static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2252 struct net_device *dev) 2287 struct net_device *dev)
2253{ 2288{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2284 2319
2285 len = skb->len; 2320 len = skb->len;
2286 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); 2321 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323 goto out_dma_error;
2287 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); 2324 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2288 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2325 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2289 (tx_flags | (len & TXFLAG_SIZE)), 2326 (tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2299 first_len = skb_headlen(skb); 2336 first_len = skb_headlen(skb);
2300 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, 2337 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2301 DMA_TO_DEVICE); 2338 DMA_TO_DEVICE);
2339 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340 goto out_dma_error;
2302 entry = NEXT_TX(entry); 2341 entry = NEXT_TX(entry);
2303 2342
2304 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2343 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2308 len = skb_frag_size(this_frag); 2347 len = skb_frag_size(this_frag);
2309 mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 2348 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2310 0, len, DMA_TO_DEVICE); 2349 0, len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351 unmap_partial_tx_skb(hp, first_mapping, first_len,
2352 first_entry, entry);
2353 goto out_dma_error;
2354 }
2311 this_txflags = tx_flags; 2355 this_txflags = tx_flags;
2312 if (frag == skb_shinfo(skb)->nr_frags - 1) 2356 if (frag == skb_shinfo(skb)->nr_frags - 1)
2313 this_txflags |= TXFLAG_EOP; 2357 this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2333 2377
2334 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); 2378 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2335 return NETDEV_TX_OK; 2379 return NETDEV_TX_OK;
2380
2381out_dma_error:
2382 hp->tx_skbs[hp->tx_new] = NULL;
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 dev_kfree_skb_any(skb);
2386 dev->stats.tx_dropped++;
2387 return NETDEV_TX_OK;
2336} 2388}
2337 2389
2338static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) 2390static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 3ae83879a75f..097ebe7077ac 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -785,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
785{ 785{
786 if (!ale) 786 if (!ale)
787 return -EINVAL; 787 return -EINVAL;
788 cpsw_ale_stop(ale);
789 cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); 788 cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
790 kfree(ale); 789 kfree(ale);
791 return 0; 790 return 0;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index ab92f67da035..4a4388b813ac 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
264 264
265 switch (ptp_class & PTP_CLASS_PMASK) { 265 switch (ptp_class & PTP_CLASS_PMASK) {
266 case PTP_CLASS_IPV4: 266 case PTP_CLASS_IPV4:
267 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 267 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
268 break; 268 break;
269 case PTP_CLASS_IPV6: 269 case PTP_CLASS_IPV6:
270 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 270 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 6f226de655a4..880cc090dc44 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -629,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
629 if (skb->ip_summed == CHECKSUM_PARTIAL) { 629 if (skb->ip_summed == CHECKSUM_PARTIAL) {
630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
631 vnet_hdr->csum_start = skb_checksum_start_offset(skb); 631 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
632 if (vlan_tx_tag_present(skb))
633 vnet_hdr->csum_start += VLAN_HLEN;
632 vnet_hdr->csum_offset = skb->csum_offset; 634 vnet_hdr->csum_offset = skb->csum_offset;
633 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 635 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
634 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 636 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2954052706e8..e22e602beef3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
791 791
792 switch (type & PTP_CLASS_PMASK) { 792 switch (type & PTP_CLASS_PMASK) {
793 case PTP_CLASS_IPV4: 793 case PTP_CLASS_IPV4:
794 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 794 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
795 break; 795 break;
796 case PTP_CLASS_IPV6: 796 case PTP_CLASS_IPV6:
797 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 797 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
934 934
935 switch (type & PTP_CLASS_PMASK) { 935 switch (type & PTP_CLASS_PMASK) {
936 case PTP_CLASS_IPV4: 936 case PTP_CLASS_IPV4:
937 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 937 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
938 break; 938 break;
939 case PTP_CLASS_IPV6: 939 case PTP_CLASS_IPV6:
940 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 940 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1dfffdc9dfc3..767cd110f496 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
352{ 352{
353 struct mii_ioctl_data *mii_data = if_mii(ifr); 353 struct mii_ioctl_data *mii_data = if_mii(ifr);
354 u16 val = mii_data->val_in; 354 u16 val = mii_data->val_in;
355 bool change_autoneg = false;
355 356
356 switch (cmd) { 357 switch (cmd) {
357 case SIOCGMIIPHY: 358 case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
367 if (mii_data->phy_id == phydev->addr) { 368 if (mii_data->phy_id == phydev->addr) {
368 switch (mii_data->reg_num) { 369 switch (mii_data->reg_num) {
369 case MII_BMCR: 370 case MII_BMCR:
370 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) 371 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
372 if (phydev->autoneg == AUTONEG_ENABLE)
373 change_autoneg = true;
371 phydev->autoneg = AUTONEG_DISABLE; 374 phydev->autoneg = AUTONEG_DISABLE;
372 else 375 if (val & BMCR_FULLDPLX)
376 phydev->duplex = DUPLEX_FULL;
377 else
378 phydev->duplex = DUPLEX_HALF;
379 if (val & BMCR_SPEED1000)
380 phydev->speed = SPEED_1000;
381 else if (val & BMCR_SPEED100)
382 phydev->speed = SPEED_100;
383 else phydev->speed = SPEED_10;
384 }
385 else {
386 if (phydev->autoneg == AUTONEG_DISABLE)
387 change_autoneg = true;
373 phydev->autoneg = AUTONEG_ENABLE; 388 phydev->autoneg = AUTONEG_ENABLE;
374 if (!phydev->autoneg && (val & BMCR_FULLDPLX)) 389 }
375 phydev->duplex = DUPLEX_FULL;
376 else
377 phydev->duplex = DUPLEX_HALF;
378 if (!phydev->autoneg && (val & BMCR_SPEED1000))
379 phydev->speed = SPEED_1000;
380 else if (!phydev->autoneg &&
381 (val & BMCR_SPEED100))
382 phydev->speed = SPEED_100;
383 break; 390 break;
384 case MII_ADVERTISE: 391 case MII_ADVERTISE:
385 phydev->advertising = val; 392 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
393 change_autoneg = true;
386 break; 394 break;
387 default: 395 default:
388 /* do nothing */ 396 /* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
396 if (mii_data->reg_num == MII_BMCR && 404 if (mii_data->reg_num == MII_BMCR &&
397 val & BMCR_RESET) 405 val & BMCR_RESET)
398 return phy_init_hw(phydev); 406 return phy_init_hw(phydev);
407
408 if (change_autoneg)
409 return phy_start_aneg(phydev);
410
399 return 0; 411 return 0;
400 412
401 case SIOCSHWTSTAMP: 413 case SIOCSHWTSTAMP:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 68c3a3f4e0ab..794a47329368 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 755
756 err = get_filter(argp, &code); 756 err = get_filter(argp, &code);
757 if (err >= 0) { 757 if (err >= 0) {
758 struct bpf_prog *pass_filter = NULL;
758 struct sock_fprog_kern fprog = { 759 struct sock_fprog_kern fprog = {
759 .len = err, 760 .len = err,
760 .filter = code, 761 .filter = code,
761 }; 762 };
762 763
763 ppp_lock(ppp); 764 err = 0;
764 if (ppp->pass_filter) { 765 if (fprog.filter)
765 bpf_prog_destroy(ppp->pass_filter); 766 err = bpf_prog_create(&pass_filter, &fprog);
766 ppp->pass_filter = NULL; 767 if (!err) {
768 ppp_lock(ppp);
769 if (ppp->pass_filter)
770 bpf_prog_destroy(ppp->pass_filter);
771 ppp->pass_filter = pass_filter;
772 ppp_unlock(ppp);
767 } 773 }
768 if (fprog.filter != NULL)
769 err = bpf_prog_create(&ppp->pass_filter,
770 &fprog);
771 else
772 err = 0;
773 kfree(code); 774 kfree(code);
774 ppp_unlock(ppp);
775 } 775 }
776 break; 776 break;
777 } 777 }
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
781 781
782 err = get_filter(argp, &code); 782 err = get_filter(argp, &code);
783 if (err >= 0) { 783 if (err >= 0) {
784 struct bpf_prog *active_filter = NULL;
784 struct sock_fprog_kern fprog = { 785 struct sock_fprog_kern fprog = {
785 .len = err, 786 .len = err,
786 .filter = code, 787 .filter = code,
787 }; 788 };
788 789
789 ppp_lock(ppp); 790 err = 0;
790 if (ppp->active_filter) { 791 if (fprog.filter)
791 bpf_prog_destroy(ppp->active_filter); 792 err = bpf_prog_create(&active_filter, &fprog);
792 ppp->active_filter = NULL; 793 if (!err) {
794 ppp_lock(ppp);
795 if (ppp->active_filter)
796 bpf_prog_destroy(ppp->active_filter);
797 ppp->active_filter = active_filter;
798 ppp_unlock(ppp);
793 } 799 }
794 if (fprog.filter != NULL)
795 err = bpf_prog_create(&ppp->active_filter,
796 &fprog);
797 else
798 err = 0;
799 kfree(code); 800 kfree(code);
800 ppp_unlock(ppp);
801 } 801 }
802 break; 802 break;
803 } 803 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7302398f0b1f..9dd3746994a4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1235,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1235 struct tun_pi pi = { 0, skb->protocol }; 1235 struct tun_pi pi = { 0, skb->protocol };
1236 ssize_t total = 0; 1236 ssize_t total = 0;
1237 int vlan_offset = 0, copied; 1237 int vlan_offset = 0, copied;
1238 int vlan_hlen = 0;
1239 int vnet_hdr_sz = 0;
1240
1241 if (vlan_tx_tag_present(skb))
1242 vlan_hlen = VLAN_HLEN;
1243
1244 if (tun->flags & TUN_VNET_HDR)
1245 vnet_hdr_sz = tun->vnet_hdr_sz;
1238 1246
1239 if (!(tun->flags & TUN_NO_PI)) { 1247 if (!(tun->flags & TUN_NO_PI)) {
1240 if ((len -= sizeof(pi)) < 0) 1248 if ((len -= sizeof(pi)) < 0)
1241 return -EINVAL; 1249 return -EINVAL;
1242 1250
1243 if (len < skb->len) { 1251 if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
1244 /* Packet will be striped */ 1252 /* Packet will be striped */
1245 pi.flags |= TUN_PKT_STRIP; 1253 pi.flags |= TUN_PKT_STRIP;
1246 } 1254 }
@@ -1250,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1250 total += sizeof(pi); 1258 total += sizeof(pi);
1251 } 1259 }
1252 1260
1253 if (tun->flags & TUN_VNET_HDR) { 1261 if (vnet_hdr_sz) {
1254 struct virtio_net_hdr gso = { 0 }; /* no info leak */ 1262 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1255 if ((len -= tun->vnet_hdr_sz) < 0) 1263 if ((len -= vnet_hdr_sz) < 0)
1256 return -EINVAL; 1264 return -EINVAL;
1257 1265
1258 if (skb_is_gso(skb)) { 1266 if (skb_is_gso(skb)) {
@@ -1284,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1284 1292
1285 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1293 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1286 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 1294 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1287 gso.csum_start = skb_checksum_start_offset(skb); 1295 gso.csum_start = skb_checksum_start_offset(skb) +
1296 vlan_hlen;
1288 gso.csum_offset = skb->csum_offset; 1297 gso.csum_offset = skb->csum_offset;
1289 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1298 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1290 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; 1299 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1293,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1293 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, 1302 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1294 sizeof(gso)))) 1303 sizeof(gso))))
1295 return -EFAULT; 1304 return -EFAULT;
1296 total += tun->vnet_hdr_sz; 1305 total += vnet_hdr_sz;
1297 } 1306 }
1298 1307
1299 copied = total; 1308 copied = total;
1300 total += skb->len; 1309 len = min_t(int, skb->len + vlan_hlen, len);
1301 if (!vlan_tx_tag_present(skb)) { 1310 total += skb->len + vlan_hlen;
1302 len = min_t(int, skb->len, len); 1311 if (vlan_hlen) {
1303 } else {
1304 int copy, ret; 1312 int copy, ret;
1305 struct { 1313 struct {
1306 __be16 h_vlan_proto; 1314 __be16 h_vlan_proto;
@@ -1311,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1311 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 1319 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
1312 1320
1313 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1321 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1314 len = min_t(int, skb->len + VLAN_HLEN, len);
1315 total += VLAN_HLEN;
1316 1322
1317 copy = min_t(int, vlan_offset, len); 1323 copy = min_t(int, vlan_offset, len);
1318 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 1324 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c05f6cdb12f..816d511e34d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
465 return ret; 465 return ret;
466 } 466 }
467 467
468 ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); 468 ax88772_reset(dev);
469 if (ret < 0)
470 return ret;
471
472 msleep(150);
473
474 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
475 if (ret < 0)
476 return ret;
477
478 msleep(150);
479
480 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
481 469
482 /* Read PHYID register *AFTER* the PHY was reset properly */ 470 /* Read PHYID register *AFTER* the PHY was reset properly */
483 phyid = asix_get_phyid(dev); 471 phyid = asix_get_phyid(dev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca309820d39e..fa9dc45b75a6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -275,13 +275,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
275 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 275 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
276} 276}
277 277
278/* Find VXLAN socket based on network namespace and UDP port */ 278/* Find VXLAN socket based on network namespace, address family and UDP port */
279static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) 279static struct vxlan_sock *vxlan_find_sock(struct net *net,
280 sa_family_t family, __be16 port)
280{ 281{
281 struct vxlan_sock *vs; 282 struct vxlan_sock *vs;
282 283
283 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 284 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
284 if (inet_sk(vs->sock->sk)->inet_sport == port) 285 if (inet_sk(vs->sock->sk)->inet_sport == port &&
286 inet_sk(vs->sock->sk)->sk.sk_family == family)
285 return vs; 287 return vs;
286 } 288 }
287 return NULL; 289 return NULL;
@@ -300,11 +302,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
300} 302}
301 303
302/* Look up VNI in a per net namespace table */ 304/* Look up VNI in a per net namespace table */
303static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) 305static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
306 sa_family_t family, __be16 port)
304{ 307{
305 struct vxlan_sock *vs; 308 struct vxlan_sock *vs;
306 309
307 vs = vxlan_find_sock(net, port); 310 vs = vxlan_find_sock(net, family, port);
308 if (!vs) 311 if (!vs)
309 return NULL; 312 return NULL;
310 313
@@ -621,6 +624,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
621 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); 624 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
622 int err = -ENOSYS; 625 int err = -ENOSYS;
623 626
627 udp_tunnel_gro_complete(skb, nhoff);
628
624 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); 629 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
625 type = eh->h_proto; 630 type = eh->h_proto;
626 631
@@ -1771,7 +1776,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1771 struct vxlan_dev *dst_vxlan; 1776 struct vxlan_dev *dst_vxlan;
1772 1777
1773 ip_rt_put(rt); 1778 ip_rt_put(rt);
1774 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); 1779 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1780 dst->sa.sa_family, dst_port);
1775 if (!dst_vxlan) 1781 if (!dst_vxlan)
1776 goto tx_error; 1782 goto tx_error;
1777 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1783 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1831,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1825 struct vxlan_dev *dst_vxlan; 1831 struct vxlan_dev *dst_vxlan;
1826 1832
1827 dst_release(ndst); 1833 dst_release(ndst);
1828 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); 1834 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1835 dst->sa.sa_family, dst_port);
1829 if (!dst_vxlan) 1836 if (!dst_vxlan)
1830 goto tx_error; 1837 goto tx_error;
1831 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1838 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1992,15 @@ static int vxlan_init(struct net_device *dev)
1985 struct vxlan_dev *vxlan = netdev_priv(dev); 1992 struct vxlan_dev *vxlan = netdev_priv(dev);
1986 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 1993 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
1987 struct vxlan_sock *vs; 1994 struct vxlan_sock *vs;
1995 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
1988 1996
1989 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1997 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1990 if (!dev->tstats) 1998 if (!dev->tstats)
1991 return -ENOMEM; 1999 return -ENOMEM;
1992 2000
1993 spin_lock(&vn->sock_lock); 2001 spin_lock(&vn->sock_lock);
1994 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); 2002 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
2003 vxlan->dst_port);
1995 if (vs) { 2004 if (vs) {
1996 /* If we have a socket with same port already, reuse it */ 2005 /* If we have a socket with same port already, reuse it */
1997 atomic_inc(&vs->refcnt); 2006 atomic_inc(&vs->refcnt);
@@ -2382,6 +2391,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2382{ 2391{
2383 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2392 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2384 struct vxlan_sock *vs; 2393 struct vxlan_sock *vs;
2394 bool ipv6 = flags & VXLAN_F_IPV6;
2385 2395
2386 vs = vxlan_socket_create(net, port, rcv, data, flags); 2396 vs = vxlan_socket_create(net, port, rcv, data, flags);
2387 if (!IS_ERR(vs)) 2397 if (!IS_ERR(vs))
@@ -2391,7 +2401,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2391 return vs; 2401 return vs;
2392 2402
2393 spin_lock(&vn->sock_lock); 2403 spin_lock(&vn->sock_lock);
2394 vs = vxlan_find_sock(net, port); 2404 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
2395 if (vs) { 2405 if (vs) {
2396 if (vs->rcv == rcv) 2406 if (vs->rcv == rcv)
2397 atomic_inc(&vs->refcnt); 2407 atomic_inc(&vs->refcnt);
@@ -2550,7 +2560,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2550 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2560 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2551 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2561 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2552 2562
2553 if (vxlan_find_vni(net, vni, vxlan->dst_port)) { 2563 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2564 vxlan->dst_port)) {
2554 pr_info("duplicate VNI %u\n", vni); 2565 pr_info("duplicate VNI %u\n", vni);
2555 return -EEXIST; 2566 return -EEXIST;
2556 } 2567 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index e0d9f19650b0..eb03943f8463 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
284 284
285 lockdep_assert_held(&mvm->mutex); 285 lockdep_assert_held(&mvm->mutex);
286 286
287 if (WARN_ON_ONCE(mvm->init_ucode_complete)) 287 if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
288 return 0; 288 return 0;
289 289
290 iwl_init_notification_wait(&mvm->notif_wait, 290 iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
334 goto out; 334 goto out;
335 } 335 }
336 336
337 mvm->calibrating = true;
338
337 /* Send TX valid antennas before triggering calibrations */ 339 /* Send TX valid antennas before triggering calibrations */
338 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); 340 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
339 if (ret) 341 if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
358 MVM_UCODE_CALIB_TIMEOUT); 360 MVM_UCODE_CALIB_TIMEOUT);
359 if (!ret) 361 if (!ret)
360 mvm->init_ucode_complete = true; 362 mvm->init_ucode_complete = true;
363
364 if (ret && iwl_mvm_is_radio_killed(mvm)) {
365 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
366 ret = 1;
367 }
361 goto out; 368 goto out;
362 369
363error: 370error:
364 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 371 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
365out: 372out:
373 mvm->calibrating = false;
366 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 374 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
367 /* we want to debug INIT and we have no NVM - fake */ 375 /* we want to debug INIT and we have no NVM - fake */
368 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 376 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 585fe5b7100f..b62405865b25 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -788,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
788 788
789 mvm->scan_status = IWL_MVM_SCAN_NONE; 789 mvm->scan_status = IWL_MVM_SCAN_NONE;
790 mvm->ps_disabled = false; 790 mvm->ps_disabled = false;
791 mvm->calibrating = false;
791 792
792 /* just in case one was running */ 793 /* just in case one was running */
793 ieee80211_remain_on_channel_expired(mvm->hw); 794 ieee80211_remain_on_channel_expired(mvm->hw);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b153ced7015b..845429c88cf4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -548,6 +548,7 @@ struct iwl_mvm {
548 enum iwl_ucode_type cur_ucode; 548 enum iwl_ucode_type cur_ucode;
549 bool ucode_loaded; 549 bool ucode_loaded;
550 bool init_ucode_complete; 550 bool init_ucode_complete;
551 bool calibrating;
551 u32 error_event_table; 552 u32 error_event_table;
552 u32 log_event_table; 553 u32 log_event_table;
553 u32 umac_error_event_table; 554 u32 umac_error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 48cb25a93591..5b719ee8e789 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -424,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
424 } 424 }
425 mvm->sf_state = SF_UNINIT; 425 mvm->sf_state = SF_UNINIT;
426 mvm->low_latency_agg_frame_limit = 6; 426 mvm->low_latency_agg_frame_limit = 6;
427 mvm->cur_ucode = IWL_UCODE_INIT;
427 428
428 mutex_init(&mvm->mutex); 429 mutex_init(&mvm->mutex);
429 mutex_init(&mvm->d0i3_suspend_mutex); 430 mutex_init(&mvm->d0i3_suspend_mutex);
@@ -752,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
752static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 753static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
753{ 754{
754 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 755 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
756 bool calibrating = ACCESS_ONCE(mvm->calibrating);
755 757
756 if (state) 758 if (state)
757 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 759 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -760,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
760 762
761 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 763 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
762 764
763 return state && mvm->cur_ucode != IWL_UCODE_INIT; 765 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
766 if (calibrating)
767 iwl_abort_notification_waits(&mvm->notif_wait);
768
769 /*
770 * Stop the device if we run OPERATIONAL firmware or if we are in the
771 * middle of the calibrations.
772 */
773 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
764} 774}
765 775
766static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 776static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 3781b029e54a..160c3ebc48d0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -915,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
915 * restart. So don't process again if the device is 915 * restart. So don't process again if the device is
916 * already dead. 916 * already dead.
917 */ 917 */
918 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 918 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
919 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
919 iwl_pcie_tx_stop(trans); 920 iwl_pcie_tx_stop(trans);
920 iwl_pcie_rx_stop(trans); 921 iwl_pcie_rx_stop(trans);
921 922
@@ -945,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
945 /* clear all status bits */ 946 /* clear all status bits */
946 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 947 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
947 clear_bit(STATUS_INT_ENABLED, &trans->status); 948 clear_bit(STATUS_INT_ENABLED, &trans->status);
948 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
949 clear_bit(STATUS_TPOWER_PMI, &trans->status); 949 clear_bit(STATUS_TPOWER_PMI, &trans->status);
950 clear_bit(STATUS_RFKILL, &trans->status); 950 clear_bit(STATUS_RFKILL, &trans->status);
951 951
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index babbdc1ce741..c9ad4cf1adfb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
1987 if (err != 0) { 1987 if (err != 0) {
1988 printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", 1988 printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
1989 err); 1989 err);
1990 goto failed_hw; 1990 goto failed_bind;
1991 } 1991 }
1992 1992
1993 skb_queue_head_init(&data->pending); 1993 skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2183 return idx; 2183 return idx;
2184 2184
2185failed_hw: 2185failed_hw:
2186 device_release_driver(data->dev);
2187failed_bind:
2186 device_unregister(data->dev); 2188 device_unregister(data->dev);
2187failed_drvdata: 2189failed_drvdata:
2188 ieee80211_free_hw(hw); 2190 ieee80211_free_hw(hw);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 4dcfb7116a04..a2eabe6ff9ad 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -202,6 +202,7 @@ config TC1100_WMI
202config HP_ACCEL 202config HP_ACCEL
203 tristate "HP laptop accelerometer" 203 tristate "HP laptop accelerometer"
204 depends on INPUT && ACPI 204 depends on INPUT && ACPI
205 depends on SERIO_I8042
205 select SENSORS_LIS3LV02D 206 select SENSORS_LIS3LV02D
206 select NEW_LEDS 207 select NEW_LEDS
207 select LEDS_CLASS 208 select LEDS_CLASS
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 13e14ec1d3d7..6bec745b6b92 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -37,6 +37,8 @@
37#include <linux/leds.h> 37#include <linux/leds.h>
38#include <linux/atomic.h> 38#include <linux/atomic.h>
39#include <linux/acpi.h> 39#include <linux/acpi.h>
40#include <linux/i8042.h>
41#include <linux/serio.h>
40#include "../../misc/lis3lv02d/lis3lv02d.h" 42#include "../../misc/lis3lv02d/lis3lv02d.h"
41 43
42#define DRIVER_NAME "hp_accel" 44#define DRIVER_NAME "hp_accel"
@@ -73,6 +75,13 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
73 75
74/* HP-specific accelerometer driver ------------------------------------ */ 76/* HP-specific accelerometer driver ------------------------------------ */
75 77
78/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
79 * HPQ6000 sends through the keyboard bus */
80#define ACCEL_1 0x25
81#define ACCEL_2 0x26
82#define ACCEL_3 0x27
83#define ACCEL_4 0x28
84
76/* For automatic insertion of the module */ 85/* For automatic insertion of the module */
77static const struct acpi_device_id lis3lv02d_device_ids[] = { 86static const struct acpi_device_id lis3lv02d_device_ids[] = {
78 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */ 87 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
@@ -294,6 +303,35 @@ static void lis3lv02d_enum_resources(struct acpi_device *device)
294 printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n"); 303 printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n");
295} 304}
296 305
306static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
307 struct serio *port)
308{
309 static bool extended;
310
311 if (str & I8042_STR_AUXDATA)
312 return false;
313
314 if (data == 0xe0) {
315 extended = true;
316 return true;
317 } else if (unlikely(extended)) {
318 extended = false;
319
320 switch (data) {
321 case ACCEL_1:
322 case ACCEL_2:
323 case ACCEL_3:
324 case ACCEL_4:
325 return true;
326 default:
327 serio_interrupt(port, 0xe0, 0);
328 return false;
329 }
330 }
331
332 return false;
333}
334
297static int lis3lv02d_add(struct acpi_device *device) 335static int lis3lv02d_add(struct acpi_device *device)
298{ 336{
299 int ret; 337 int ret;
@@ -326,6 +364,11 @@ static int lis3lv02d_add(struct acpi_device *device)
326 if (ret) 364 if (ret)
327 return ret; 365 return ret;
328 366
367 /* filter to remove HPQ6000 accelerometer data
368 * from keyboard bus stream */
369 if (strstr(dev_name(&device->dev), "HPQ6000"))
370 i8042_install_filter(hp_accel_i8042_filter);
371
329 INIT_WORK(&hpled_led.work, delayed_set_status_worker); 372 INIT_WORK(&hpled_led.work, delayed_set_status_worker);
330 ret = led_classdev_register(NULL, &hpled_led.led_classdev); 373 ret = led_classdev_register(NULL, &hpled_led.led_classdev);
331 if (ret) { 374 if (ret) {
@@ -343,6 +386,7 @@ static int lis3lv02d_remove(struct acpi_device *device)
343 if (!device) 386 if (!device)
344 return -EINVAL; 387 return -EINVAL;
345 388
389 i8042_remove_filter(hp_accel_i8042_filter);
346 lis3lv02d_joystick_disable(&lis3_dev); 390 lis3lv02d_joystick_disable(&lis3_dev);
347 lis3lv02d_poweroff(&lis3_dev); 391 lis3lv02d_poweroff(&lis3_dev);
348 392
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 6cbe6ef3c889..bda52f18e967 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -888,7 +888,6 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
888 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 888 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
889 int i; 889 int i;
890 struct virtqueue *vq; 890 struct virtqueue *vq;
891 struct virtio_driver *drv;
892 891
893 if (!vcdev) 892 if (!vcdev)
894 return; 893 return;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 461bf3d033a0..5a1f1070b702 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -459,6 +459,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
459 int measure_freq; 459 int measure_freq;
460 int ret; 460 int ret;
461 461
462 if (!cpufreq_get_current_driver()) {
463 dev_dbg(&pdev->dev, "no cpufreq driver!");
464 return -EPROBE_DEFER;
465 }
462 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 466 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
463 if (!data) 467 if (!data)
464 return -ENOMEM; 468 return -ENOMEM;
@@ -521,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev)
521 return ret; 525 return ret;
522 } 526 }
523 527
528 data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
529 if (IS_ERR(data->thermal_clk)) {
530 ret = PTR_ERR(data->thermal_clk);
531 if (ret != -EPROBE_DEFER)
532 dev_err(&pdev->dev,
533 "failed to get thermal clk: %d\n", ret);
534 cpufreq_cooling_unregister(data->cdev);
535 return ret;
536 }
537
538 /*
539 * Thermal sensor needs clk on to get correct value, normally
540 * we should enable its clk before taking measurement and disable
541 * clk after measurement is done, but if alarm function is enabled,
542 * hardware will auto measure the temperature periodically, so we
543 * need to keep the clk always on for alarm function.
544 */
545 ret = clk_prepare_enable(data->thermal_clk);
546 if (ret) {
547 dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
548 cpufreq_cooling_unregister(data->cdev);
549 return ret;
550 }
551
524 data->tz = thermal_zone_device_register("imx_thermal_zone", 552 data->tz = thermal_zone_device_register("imx_thermal_zone",
525 IMX_TRIP_NUM, 553 IMX_TRIP_NUM,
526 BIT(IMX_TRIP_PASSIVE), data, 554 BIT(IMX_TRIP_PASSIVE), data,
@@ -531,26 +559,11 @@ static int imx_thermal_probe(struct platform_device *pdev)
531 ret = PTR_ERR(data->tz); 559 ret = PTR_ERR(data->tz);
532 dev_err(&pdev->dev, 560 dev_err(&pdev->dev,
533 "failed to register thermal zone device %d\n", ret); 561 "failed to register thermal zone device %d\n", ret);
562 clk_disable_unprepare(data->thermal_clk);
534 cpufreq_cooling_unregister(data->cdev); 563 cpufreq_cooling_unregister(data->cdev);
535 return ret; 564 return ret;
536 } 565 }
537 566
538 data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
539 if (IS_ERR(data->thermal_clk)) {
540 dev_warn(&pdev->dev, "failed to get thermal clk!\n");
541 } else {
542 /*
543 * Thermal sensor needs clk on to get correct value, normally
544 * we should enable its clk before taking measurement and disable
545 * clk after measurement is done, but if alarm function is enabled,
546 * hardware will auto measure the temperature periodically, so we
547 * need to keep the clk always on for alarm function.
548 */
549 ret = clk_prepare_enable(data->thermal_clk);
550 if (ret)
551 dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
552 }
553
554 /* Enable measurements at ~ 10 Hz */ 567 /* Enable measurements at ~ 10 Hz */
555 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); 568 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
556 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ 569 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index d20dba986f0f..6e9fb62eb817 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -92,7 +92,13 @@ static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
92 if (ACPI_FAILURE(status)) 92 if (ACPI_FAILURE(status))
93 return -EIO; 93 return -EIO;
94 94
95 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(hyst, KELVIN_OFFSET); 95 /*
96 * Thermal hysteresis represents a temperature difference.
97 * Kelvin and Celsius have same degree size. So the
98 * conversion here between tenths of degree Kelvin unit
99 * and Milli-Celsius unit is just to multiply 100.
100 */
101 *temp = hyst * 100;
96 102
97 return 0; 103 return 0;
98} 104}
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index 2683d2897e90..1724f6cdaef8 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -264,7 +264,6 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
264static const struct exynos_tmu_registers exynos5260_tmu_registers = { 264static const struct exynos_tmu_registers exynos5260_tmu_registers = {
265 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 265 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
266 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 266 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
267 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
268 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 267 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
269 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, 268 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
270 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, 269 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index 65e2ea6a9579..63de598c9c2c 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -75,7 +75,6 @@
75#define EXYNOS_MAX_TRIGGER_PER_REG 4 75#define EXYNOS_MAX_TRIGGER_PER_REG 4
76 76
77/* Exynos5260 specific */ 77/* Exynos5260 specific */
78#define EXYNOS_TMU_REG_CONTROL1 0x24
79#define EXYNOS5260_TMU_REG_INTEN 0xC0 78#define EXYNOS5260_TMU_REG_INTEN 0xC0
80#define EXYNOS5260_TMU_REG_INTSTAT 0xC4 79#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
81#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8 80#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 659f2ea9e6f7..cefca661464b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2638,7 +2638,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2638 2638
2639 for (i = 0; i < CEPH_CAP_BITS; i++) 2639 for (i = 0; i < CEPH_CAP_BITS; i++)
2640 if ((dirty & (1 << i)) && 2640 if ((dirty & (1 << i)) &&
2641 flush_tid == ci->i_cap_flush_tid[i]) 2641 (u16)flush_tid == ci->i_cap_flush_tid[i])
2642 cleaned |= 1 << i; 2642 cleaned |= 1 << i;
2643 2643
2644 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 2644 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 9d3e9c50066a..89326acd4561 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -229,8 +229,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
229 &fsnotify_mark_srcu); 229 &fsnotify_mark_srcu);
230 } 230 }
231 231
232 /*
233 * We need to merge inode & vfsmount mark lists so that inode mark
234 * ignore masks are properly reflected for mount mark notifications.
235 * That's why this traversal is so complicated...
236 */
232 while (inode_node || vfsmount_node) { 237 while (inode_node || vfsmount_node) {
233 inode_group = vfsmount_group = NULL; 238 inode_group = NULL;
239 inode_mark = NULL;
240 vfsmount_group = NULL;
241 vfsmount_mark = NULL;
234 242
235 if (inode_node) { 243 if (inode_node) {
236 inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu), 244 inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
@@ -244,21 +252,19 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
244 vfsmount_group = vfsmount_mark->group; 252 vfsmount_group = vfsmount_mark->group;
245 } 253 }
246 254
247 if (inode_group > vfsmount_group) { 255 if (inode_group && vfsmount_group) {
248 /* handle inode */ 256 int cmp = fsnotify_compare_groups(inode_group,
249 ret = send_to_group(to_tell, inode_mark, NULL, mask, 257 vfsmount_group);
250 data, data_is, cookie, file_name); 258 if (cmp > 0) {
251 /* we didn't use the vfsmount_mark */ 259 inode_group = NULL;
252 vfsmount_group = NULL; 260 inode_mark = NULL;
253 } else if (vfsmount_group > inode_group) { 261 } else if (cmp < 0) {
254 ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, 262 vfsmount_group = NULL;
255 data, data_is, cookie, file_name); 263 vfsmount_mark = NULL;
256 inode_group = NULL; 264 }
257 } else {
258 ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
259 mask, data, data_is, cookie,
260 file_name);
261 } 265 }
266 ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
267 data, data_is, cookie, file_name);
262 268
263 if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) 269 if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
264 goto out; 270 goto out;
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 9c0898c4cfe1..3b68b0ae0a97 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -12,6 +12,10 @@ extern void fsnotify_flush_notify(struct fsnotify_group *group);
12/* protects reads of inode and vfsmount marks list */ 12/* protects reads of inode and vfsmount marks list */
13extern struct srcu_struct fsnotify_mark_srcu; 13extern struct srcu_struct fsnotify_mark_srcu;
14 14
15/* compare two groups for sorting of marks lists */
16extern int fsnotify_compare_groups(struct fsnotify_group *a,
17 struct fsnotify_group *b);
18
15extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark, 19extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
16 __u32 mask); 20 __u32 mask);
17/* add a mark to an inode */ 21/* add a mark to an inode */
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index e8497144b323..dfbf5447eea4 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -194,6 +194,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
194{ 194{
195 struct fsnotify_mark *lmark, *last = NULL; 195 struct fsnotify_mark *lmark, *last = NULL;
196 int ret = 0; 196 int ret = 0;
197 int cmp;
197 198
198 mark->flags |= FSNOTIFY_MARK_FLAG_INODE; 199 mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
199 200
@@ -219,11 +220,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
219 goto out; 220 goto out;
220 } 221 }
221 222
222 if (mark->group->priority < lmark->group->priority) 223 cmp = fsnotify_compare_groups(lmark->group, mark->group);
223 continue; 224 if (cmp < 0)
224
225 if ((mark->group->priority == lmark->group->priority) &&
226 (mark->group < lmark->group))
227 continue; 225 continue;
228 226
229 hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list); 227 hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d90deaa08e78..34c38fabf514 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -210,6 +210,42 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
210} 210}
211 211
212/* 212/*
213 * Sorting function for lists of fsnotify marks.
214 *
215 * Fanotify supports different notification classes (reflected as priority of
216 * notification group). Events shall be passed to notification groups in
217 * decreasing priority order. To achieve this marks in notification lists for
218 * inodes and vfsmounts are sorted so that priorities of corresponding groups
219 * are descending.
220 *
221 * Furthermore correct handling of the ignore mask requires processing inode
222 * and vfsmount marks of each group together. Using the group address as
223 * further sort criterion provides a unique sorting order and thus we can
224 * merge inode and vfsmount lists of marks in linear time and find groups
225 * present in both lists.
226 *
227 * A return value of 1 signifies that b has priority over a.
228 * A return value of 0 signifies that the two marks have to be handled together.
229 * A return value of -1 signifies that a has priority over b.
230 */
231int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
232{
233 if (a == b)
234 return 0;
235 if (!a)
236 return 1;
237 if (!b)
238 return -1;
239 if (a->priority < b->priority)
240 return 1;
241 if (a->priority > b->priority)
242 return -1;
243 if (a < b)
244 return 1;
245 return -1;
246}
247
248/*
213 * Attach an initialized mark to a given group and fs object. 249 * Attach an initialized mark to a given group and fs object.
214 * These marks may be used for the fsnotify backend to determine which 250 * These marks may be used for the fsnotify backend to determine which
215 * event types should be delivered to which group. 251 * event types should be delivered to which group.
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index ac851e8376b1..faefa72a11eb 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -153,6 +153,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
153 struct mount *m = real_mount(mnt); 153 struct mount *m = real_mount(mnt);
154 struct fsnotify_mark *lmark, *last = NULL; 154 struct fsnotify_mark *lmark, *last = NULL;
155 int ret = 0; 155 int ret = 0;
156 int cmp;
156 157
157 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; 158 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
158 159
@@ -178,11 +179,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
178 goto out; 179 goto out;
179 } 180 }
180 181
181 if (mark->group->priority < lmark->group->priority) 182 cmp = fsnotify_compare_groups(lmark->group, mark->group);
182 continue; 183 if (cmp < 0)
183
184 if ((mark->group->priority == lmark->group->priority) &&
185 (mark->group < lmark->group))
186 continue; 184 continue;
187 185
188 hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list); 186 hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e2bd4c95b66..0995c2de8162 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
47 47
48extern unsigned long free_all_bootmem(void); 48extern unsigned long free_all_bootmem(void);
49extern void reset_node_managed_pages(pg_data_t *pgdat);
49extern void reset_all_zones_managed_pages(void); 50extern void reset_all_zones_managed_pages(void);
50 51
51extern void free_bootmem_node(pg_data_t *pgdat, 52extern void free_bootmem_node(pg_data_t *pgdat,
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index fc17d56581b2..582e67f34054 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -330,6 +330,13 @@ enum max77693_irq_source {
330 MAX77693_IRQ_GROUP_NR, 330 MAX77693_IRQ_GROUP_NR,
331}; 331};
332 332
333#define SRC_IRQ_CHARGER BIT(0)
334#define SRC_IRQ_TOP BIT(1)
335#define SRC_IRQ_FLASH BIT(2)
336#define SRC_IRQ_MUIC BIT(3)
337#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
338 | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
339
333#define LED_IRQ_FLED2_OPEN BIT(0) 340#define LED_IRQ_FLED2_OPEN BIT(0)
334#define LED_IRQ_FLED2_SHORT BIT(1) 341#define LED_IRQ_FLED2_SHORT BIT(1)
335#define LED_IRQ_FLED1_OPEN BIT(2) 342#define LED_IRQ_FLED1_OPEN BIT(2)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 48bf12ef6620..ffe66e381c04 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -431,6 +431,15 @@ struct zone {
431 */ 431 */
432 int nr_migrate_reserve_block; 432 int nr_migrate_reserve_block;
433 433
434#ifdef CONFIG_MEMORY_ISOLATION
435 /*
436 * Number of isolated pageblock. It is used to solve incorrect
437 * freepage counting problem due to racy retrieving migratetype
438 * of pageblock. Protected by zone->lock.
439 */
440 unsigned long nr_isolate_pageblock;
441#endif
442
434#ifdef CONFIG_MEMORY_HOTPLUG 443#ifdef CONFIG_MEMORY_HOTPLUG
435 /* see spanned/present_pages for more description */ 444 /* see spanned/present_pages for more description */
436 seqlock_t span_seqlock; 445 seqlock_t span_seqlock;
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3fff8e774067..2dc1e1697b45 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,6 +2,10 @@
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4#ifdef CONFIG_MEMORY_ISOLATION 4#ifdef CONFIG_MEMORY_ISOLATION
5static inline bool has_isolate_pageblock(struct zone *zone)
6{
7 return zone->nr_isolate_pageblock;
8}
5static inline bool is_migrate_isolate_page(struct page *page) 9static inline bool is_migrate_isolate_page(struct page *page)
6{ 10{
7 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; 11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
11 return migratetype == MIGRATE_ISOLATE; 15 return migratetype == MIGRATE_ISOLATE;
12} 16}
13#else 17#else
18static inline bool has_isolate_pageblock(struct zone *zone)
19{
20 return false;
21}
14static inline bool is_migrate_isolate_page(struct page *page) 22static inline bool is_migrate_isolate_page(struct page *page)
15{ 23{
16 return false; 24 return false;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 73e938b7e937..2e0e06daf8c0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -72,8 +72,10 @@ struct generic_pm_domain {
72 bool max_off_time_changed; 72 bool max_off_time_changed;
73 bool cached_power_down_ok; 73 bool cached_power_down_ok;
74 struct gpd_cpuidle_data *cpuidle_data; 74 struct gpd_cpuidle_data *cpuidle_data;
75 void (*attach_dev)(struct device *dev); 75 int (*attach_dev)(struct generic_pm_domain *domain,
76 void (*detach_dev)(struct device *dev); 76 struct device *dev);
77 void (*detach_dev)(struct generic_pm_domain *domain,
78 struct device *dev);
77}; 79};
78 80
79static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 81static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -104,7 +106,7 @@ struct generic_pm_domain_data {
104 struct notifier_block nb; 106 struct notifier_block nb;
105 struct mutex lock; 107 struct mutex lock;
106 unsigned int refcount; 108 unsigned int refcount;
107 bool need_restore; 109 int need_restore;
108}; 110};
109 111
110#ifdef CONFIG_PM_GENERIC_DOMAINS 112#ifdef CONFIG_PM_GENERIC_DOMAINS
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 49a4d6f59108..e2c13cd863bd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
97 __ring_buffer_alloc((size), (flags), &__key); \ 97 __ring_buffer_alloc((size), (flags), &__key); \
98}) 98})
99 99
100int ring_buffer_wait(struct ring_buffer *buffer, int cpu); 100int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
102 struct file *filp, poll_table *poll_table); 102 struct file *filp, poll_table *poll_table);
103 103
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec538fc287a6..bb9b83640070 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -256,7 +256,7 @@ struct ucred {
256#define MSG_EOF MSG_FIN 256#define MSG_EOF MSG_FIN
257 257
258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file 259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
260 descriptor received through 260 descriptor received through
261 SCM_RIGHTS */ 261 SCM_RIGHTS */
262#if defined(CONFIG_COMPAT) 262#if defined(CONFIG_COMPAT)
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index d9fa68f26c41..2a25dec30211 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -34,7 +34,6 @@
34 * @list: used to maintain a list of currently available transports 34 * @list: used to maintain a list of currently available transports
35 * @name: the human-readable name of the transport 35 * @name: the human-readable name of the transport
36 * @maxsize: transport provided maximum packet size 36 * @maxsize: transport provided maximum packet size
37 * @pref: Preferences of this transport
38 * @def: set if this transport should be considered the default 37 * @def: set if this transport should be considered the default
39 * @create: member function to create a new connection on this transport 38 * @create: member function to create a new connection on this transport
40 * @close: member function to discard a connection on this transport 39 * @close: member function to discard a connection on this transport
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index a47790bcaa38..2a50a70ef587 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
100 return iptunnel_handle_offloads(skb, udp_csum, type); 100 return iptunnel_handle_offloads(skb, udp_csum, type);
101} 101}
102 102
103static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
104{
105 struct udphdr *uh;
106
107 uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
108 skb_shinfo(skb)->gso_type |= uh->check ?
109 SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
110}
111
103static inline void udp_tunnel_encap_enable(struct socket *sock) 112static inline void udp_tunnel_encap_enable(struct socket *sock)
104{ 113{
105#if IS_ENABLED(CONFIG_IPV6) 114#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index b70237e8bc37..4c94f31a8c99 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -125,6 +125,7 @@ header-y += filter.h
125header-y += firewire-cdev.h 125header-y += firewire-cdev.h
126header-y += firewire-constants.h 126header-y += firewire-constants.h
127header-y += flat.h 127header-y += flat.h
128header-y += fou.h
128header-y += fs.h 129header-y += fs.h
129header-y += fsl_hypervisor.h 130header-y += fsl_hypervisor.h
130header-y += fuse.h 131header-y += fuse.h
@@ -141,6 +142,7 @@ header-y += hid.h
141header-y += hiddev.h 142header-y += hiddev.h
142header-y += hidraw.h 143header-y += hidraw.h
143header-y += hpet.h 144header-y += hpet.h
145header-y += hsr_netlink.h
144header-y += hyperv.h 146header-y += hyperv.h
145header-y += hysdn_if.h 147header-y += hysdn_if.h
146header-y += i2c-dev.h 148header-y += i2c-dev.h
@@ -251,6 +253,7 @@ header-y += mii.h
251header-y += minix_fs.h 253header-y += minix_fs.h
252header-y += mman.h 254header-y += mman.h
253header-y += mmtimer.h 255header-y += mmtimer.h
256header-y += mpls.h
254header-y += mqueue.h 257header-y += mqueue.h
255header-y += mroute.h 258header-y += mroute.h
256header-y += mroute6.h 259header-y += mroute6.h
@@ -424,6 +427,7 @@ header-y += virtio_net.h
424header-y += virtio_pci.h 427header-y += virtio_pci.h
425header-y += virtio_ring.h 428header-y += virtio_ring.h
426header-y += virtio_rng.h 429header-y += virtio_rng.h
430header=y += vm_sockets.h
427header-y += vt.h 431header-y += vt.h
428header-y += wait.h 432header-y += wait.h
429header-y += wanrouter.h 433header-y += wanrouter.h
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 39f621a9fe82..da17e456908d 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/if_ether.h> 17#include <linux/if_ether.h>
18#include <linux/in6.h>
18 19
19#define SYSFS_BRIDGE_ATTR "bridge" 20#define SYSFS_BRIDGE_ATTR "bridge"
20#define SYSFS_BRIDGE_FDB "brforward" 21#define SYSFS_BRIDGE_FDB "brforward"
diff --git a/init/main.c b/init/main.c
index 800a0daede7e..321d0ceb26d3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void)
544 static_command_line, __start___param, 544 static_command_line, __start___param,
545 __stop___param - __start___param, 545 __stop___param - __start___param,
546 -1, -1, &unknown_bootoption); 546 -1, -1, &unknown_bootoption);
547 if (after_dashes) 547 if (!IS_ERR_OR_NULL(after_dashes))
548 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, 548 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
549 set_init_arg); 549 set_init_arg);
550 550
diff --git a/kernel/audit.c b/kernel/audit.c
index 80983df92cd4..cebb11db4d34 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
739 739
740 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); 740 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
741 audit_log_task_info(ab, current); 741 audit_log_task_info(ab, current);
742 audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", 742 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
743 audit_feature_names[which], !!old_feature, !!new_feature, 743 audit_feature_names[which], !!old_feature, !!new_feature,
744 !!old_lock, !!new_lock, res); 744 !!old_lock, !!new_lock, res);
745 audit_log_end(ab); 745 audit_log_end(ab);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index e242e3a9864a..80f29e015570 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
154 chunk->owners[i].index = i; 154 chunk->owners[i].index = i;
155 } 155 }
156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); 156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 chunk->mark.mask = FS_IN_IGNORED;
157 return chunk; 158 return chunk;
158} 159}
159 160
diff --git a/kernel/panic.c b/kernel/panic.c
index d09dc5c32c67..cf80672b7924 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -244,6 +244,7 @@ static const struct tnt tnts[] = {
244 * 'I' - Working around severe firmware bug. 244 * 'I' - Working around severe firmware bug.
245 * 'O' - Out-of-tree module has been loaded. 245 * 'O' - Out-of-tree module has been loaded.
246 * 'E' - Unsigned module has been loaded. 246 * 'E' - Unsigned module has been loaded.
247 * 'L' - A soft lockup has previously occurred.
247 * 248 *
248 * The string is overwritten by the next call to print_tainted(). 249 * The string is overwritten by the next call to print_tainted().
249 */ 250 */
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4ca9a33ff620..c347e3ce3a55 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
146 146
147static int platform_suspend_prepare_late(suspend_state_t state) 147static int platform_suspend_prepare_late(suspend_state_t state)
148{ 148{
149 return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ? 149 return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
150 freeze_ops->prepare() : 0; 150 freeze_ops->prepare() : 0;
151} 151}
152 152
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
164 164
165static void platform_resume_early(suspend_state_t state) 165static void platform_resume_early(suspend_state_t state)
166{ 166{
167 if (state == PM_SUSPEND_FREEZE && freeze_ops->restore) 167 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
168 freeze_ops->restore(); 168 freeze_ops->restore();
169} 169}
170 170
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2d75c94ae87d..a56e07c8d15b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
538 * ring_buffer_wait - wait for input to the ring buffer 538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on 539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on 540 * @cpu: the cpu buffer to wait on
541 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
541 * 542 *
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 543 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise 544 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer. 545 * it will wait for data to be added to a specific cpu buffer.
545 */ 546 */
546int ring_buffer_wait(struct ring_buffer *buffer, int cpu) 547int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
547{ 548{
548 struct ring_buffer_per_cpu *cpu_buffer; 549 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
549 DEFINE_WAIT(wait); 550 DEFINE_WAIT(wait);
550 struct rb_irq_work *work; 551 struct rb_irq_work *work;
552 int ret = 0;
551 553
552 /* 554 /*
553 * Depending on what the caller is waiting for, either any 555 * Depending on what the caller is waiting for, either any
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
564 } 566 }
565 567
566 568
567 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 569 while (true) {
570 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
568 571
569 /* 572 /*
570 * The events can happen in critical sections where 573 * The events can happen in critical sections where
571 * checking a work queue can cause deadlocks. 574 * checking a work queue can cause deadlocks.
572 * After adding a task to the queue, this flag is set 575 * After adding a task to the queue, this flag is set
573 * only to notify events to try to wake up the queue 576 * only to notify events to try to wake up the queue
574 * using irq_work. 577 * using irq_work.
575 * 578 *
576 * We don't clear it even if the buffer is no longer 579 * We don't clear it even if the buffer is no longer
577 * empty. The flag only causes the next event to run 580 * empty. The flag only causes the next event to run
578 * irq_work to do the work queue wake up. The worse 581 * irq_work to do the work queue wake up. The worse
579 * that can happen if we race with !trace_empty() is that 582 * that can happen if we race with !trace_empty() is that
580 * an event will cause an irq_work to try to wake up 583 * an event will cause an irq_work to try to wake up
581 * an empty queue. 584 * an empty queue.
582 * 585 *
583 * There's no reason to protect this flag either, as 586 * There's no reason to protect this flag either, as
584 * the work queue and irq_work logic will do the necessary 587 * the work queue and irq_work logic will do the necessary
585 * synchronization for the wake ups. The only thing 588 * synchronization for the wake ups. The only thing
586 * that is necessary is that the wake up happens after 589 * that is necessary is that the wake up happens after
587 * a task has been queued. It's OK for spurious wake ups. 590 * a task has been queued. It's OK for spurious wake ups.
588 */ 591 */
589 work->waiters_pending = true; 592 work->waiters_pending = true;
593
594 if (signal_pending(current)) {
595 ret = -EINTR;
596 break;
597 }
598
599 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
600 break;
601
602 if (cpu != RING_BUFFER_ALL_CPUS &&
603 !ring_buffer_empty_cpu(buffer, cpu)) {
604 unsigned long flags;
605 bool pagebusy;
606
607 if (!full)
608 break;
609
610 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
611 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
612 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
613
614 if (!pagebusy)
615 break;
616 }
590 617
591 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
592 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
593 schedule(); 618 schedule();
619 }
594 620
595 finish_wait(&work->waiters, &wait); 621 finish_wait(&work->waiters, &wait);
596 return 0; 622
623 return ret;
597} 624}
598 625
599/** 626/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8a528392b1f4..92f4a6cee172 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1076} 1076}
1077#endif /* CONFIG_TRACER_MAX_TRACE */ 1077#endif /* CONFIG_TRACER_MAX_TRACE */
1078 1078
1079static int wait_on_pipe(struct trace_iterator *iter) 1079static int wait_on_pipe(struct trace_iterator *iter, bool full)
1080{ 1080{
1081 /* Iterators are static, they should be filled or empty */ 1081 /* Iterators are static, they should be filled or empty */
1082 if (trace_buffer_iter(iter, iter->cpu_file)) 1082 if (trace_buffer_iter(iter, iter->cpu_file))
1083 return 0; 1083 return 0;
1084 1084
1085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1086 full);
1086} 1087}
1087 1088
1088#ifdef CONFIG_FTRACE_STARTUP_TEST 1089#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
4434 4435
4435 mutex_unlock(&iter->mutex); 4436 mutex_unlock(&iter->mutex);
4436 4437
4437 ret = wait_on_pipe(iter); 4438 ret = wait_on_pipe(iter, false);
4438 4439
4439 mutex_lock(&iter->mutex); 4440 mutex_lock(&iter->mutex);
4440 4441
4441 if (ret) 4442 if (ret)
4442 return ret; 4443 return ret;
4443
4444 if (signal_pending(current))
4445 return -EINTR;
4446 } 4444 }
4447 4445
4448 return 1; 4446 return 1;
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5372 goto out_unlock; 5370 goto out_unlock;
5373 } 5371 }
5374 mutex_unlock(&trace_types_lock); 5372 mutex_unlock(&trace_types_lock);
5375 ret = wait_on_pipe(iter); 5373 ret = wait_on_pipe(iter, false);
5376 mutex_lock(&trace_types_lock); 5374 mutex_lock(&trace_types_lock);
5377 if (ret) { 5375 if (ret) {
5378 size = ret; 5376 size = ret;
5379 goto out_unlock; 5377 goto out_unlock;
5380 } 5378 }
5381 if (signal_pending(current)) {
5382 size = -EINTR;
5383 goto out_unlock;
5384 }
5385 goto again; 5379 goto again;
5386 } 5380 }
5387 size = 0; 5381 size = 0;
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5500 }; 5494 };
5501 struct buffer_ref *ref; 5495 struct buffer_ref *ref;
5502 int entries, size, i; 5496 int entries, size, i;
5503 ssize_t ret; 5497 ssize_t ret = 0;
5504 5498
5505 mutex_lock(&trace_types_lock); 5499 mutex_lock(&trace_types_lock);
5506 5500
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5538 int r; 5532 int r;
5539 5533
5540 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5534 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5541 if (!ref) 5535 if (!ref) {
5536 ret = -ENOMEM;
5542 break; 5537 break;
5538 }
5543 5539
5544 ref->ref = 1; 5540 ref->ref = 1;
5545 ref->buffer = iter->trace_buffer->buffer; 5541 ref->buffer = iter->trace_buffer->buffer;
5546 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5542 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5547 if (!ref->page) { 5543 if (!ref->page) {
5544 ret = -ENOMEM;
5548 kfree(ref); 5545 kfree(ref);
5549 break; 5546 break;
5550 } 5547 }
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5582 5579
5583 /* did we read anything? */ 5580 /* did we read anything? */
5584 if (!spd.nr_pages) { 5581 if (!spd.nr_pages) {
5582 if (ret)
5583 goto out;
5584
5585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5586 ret = -EAGAIN; 5586 ret = -EAGAIN;
5587 goto out; 5587 goto out;
5588 } 5588 }
5589 mutex_unlock(&trace_types_lock); 5589 mutex_unlock(&trace_types_lock);
5590 ret = wait_on_pipe(iter); 5590 ret = wait_on_pipe(iter, true);
5591 mutex_lock(&trace_types_lock); 5591 mutex_lock(&trace_types_lock);
5592 if (ret) 5592 if (ret)
5593 goto out; 5593 goto out;
5594 if (signal_pending(current)) { 5594
5595 ret = -EINTR;
5596 goto out;
5597 }
5598 goto again; 5595 goto again;
5599 } 5596 }
5600 5597
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 081be3ba9ea8..624a0b7c05ef 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
230 ht->shift++; 230 ht->shift++;
231 231
232 /* For each new bucket, search the corresponding old bucket 232 /* For each new bucket, search the corresponding old bucket
233 * for the rst entry that hashes to the new bucket, and 233 * for the first entry that hashes to the new bucket, and
234 * link the new bucket to that entry. Since all the entries 234 * link the new bucket to that entry. Since all the entries
235 * which will end up in the new bucket appear in the same 235 * which will end up in the new bucket appear in the same
236 * old bucket, this constructs an entirely valid new hash 236 * old bucket, this constructs an entirely valid new hash
@@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
248 } 248 }
249 249
250 /* Publish the new table pointer. Lookups may now traverse 250 /* Publish the new table pointer. Lookups may now traverse
251 * the new table, but they will not benet from any 251 * the new table, but they will not benefit from any
252 * additional efciency until later steps unzip the buckets. 252 * additional efficiency until later steps unzip the buckets.
253 */ 253 */
254 rcu_assign_pointer(ht->tbl, new_tbl); 254 rcu_assign_pointer(ht->tbl, new_tbl);
255 255
@@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
306 306
307 ht->shift--; 307 ht->shift--;
308 308
309 /* Link each bucket in the new table to the rst bucket 309 /* Link each bucket in the new table to the first bucket
310 * in the old table that contains entries which will hash 310 * in the old table that contains entries which will hash
311 * to the new bucket. 311 * to the new bucket.
312 */ 312 */
313 for (i = 0; i < ntbl->size; i++) { 313 for (i = 0; i < ntbl->size; i++) {
314 ntbl->buckets[i] = tbl->buckets[i]; 314 ntbl->buckets[i] = tbl->buckets[i];
315 315
316 /* Link each bucket in the new table to the rst bucket 316 /* Link each bucket in the new table to the first bucket
317 * in the old table that contains entries which will hash 317 * in the old table that contains entries which will hash
318 * to the new bucket. 318 * to the new bucket.
319 */ 319 */
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8a000cebb0d7..477be696511d 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
243 243
244static int reset_managed_pages_done __initdata; 244static int reset_managed_pages_done __initdata;
245 245
246static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 246void reset_node_managed_pages(pg_data_t *pgdat)
247{ 247{
248 struct zone *z; 248 struct zone *z;
249 249
250 if (reset_managed_pages_done)
251 return;
252
253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 250 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
254 z->managed_pages = 0; 251 z->managed_pages = 0;
255} 252}
@@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void)
258{ 255{
259 struct pglist_data *pgdat; 256 struct pglist_data *pgdat;
260 257
258 if (reset_managed_pages_done)
259 return;
260
261 for_each_online_pgdat(pgdat) 261 for_each_online_pgdat(pgdat)
262 reset_node_managed_pages(pgdat); 262 reset_node_managed_pages(pgdat);
263
263 reset_managed_pages_done = 1; 264 reset_managed_pages_done = 1;
264} 265}
265 266
diff --git a/mm/compaction.c b/mm/compaction.c
index ec74cf0123ef..f9792ba3537c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -479,6 +479,16 @@ isolate_freepages_range(struct compact_control *cc,
479 479
480 block_end_pfn = min(block_end_pfn, end_pfn); 480 block_end_pfn = min(block_end_pfn, end_pfn);
481 481
482 /*
483 * pfn could pass the block_end_pfn if isolated freepage
484 * is more than pageblock order. In this case, we adjust
485 * scanning range to right one.
486 */
487 if (pfn >= block_end_pfn) {
488 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
489 block_end_pfn = min(block_end_pfn, end_pfn);
490 }
491
482 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 492 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
483 break; 493 break;
484 494
@@ -1029,8 +1039,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1029 } 1039 }
1030 1040
1031 acct_isolated(zone, cc); 1041 acct_isolated(zone, cc);
1032 /* Record where migration scanner will be restarted */ 1042 /*
1033 cc->migrate_pfn = low_pfn; 1043 * Record where migration scanner will be restarted. If we end up in
1044 * the same pageblock as the free scanner, make the scanners fully
1045 * meet so that compact_finished() terminates compaction.
1046 */
1047 cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
1034 1048
1035 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1049 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1036} 1050}
diff --git a/mm/internal.h b/mm/internal.h
index 829304090b90..a4f90ba7068e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
108/* 108/*
109 * in mm/page_alloc.c 109 * in mm/page_alloc.c
110 */ 110 */
111
112/*
113 * Locate the struct page for both the matching buddy in our
114 * pair (buddy1) and the combined O(n+1) page they form (page).
115 *
116 * 1) Any buddy B1 will have an order O twin B2 which satisfies
117 * the following equation:
118 * B2 = B1 ^ (1 << O)
119 * For example, if the starting buddy (buddy2) is #8 its order
120 * 1 buddy is #10:
121 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
122 *
123 * 2) Any buddy B will have an order O+1 parent P which
124 * satisfies the following equation:
125 * P = B & ~(1 << O)
126 *
127 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
128 */
129static inline unsigned long
130__find_buddy_index(unsigned long page_idx, unsigned int order)
131{
132 return page_idx ^ (1 << order);
133}
134
135extern int __isolate_free_page(struct page *page, unsigned int order);
111extern void __free_pages_bootmem(struct page *page, unsigned int order); 136extern void __free_pages_bootmem(struct page *page, unsigned int order);
112extern void prep_compound_page(struct page *page, unsigned long order); 137extern void prep_compound_page(struct page *page, unsigned long order);
113#ifdef CONFIG_MEMORY_FAILURE 138#ifdef CONFIG_MEMORY_FAILURE
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index eafcf60f6b83..e34a3cb6aad6 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -911,9 +911,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
911 if (i->nr_segs == 1) 911 if (i->nr_segs == 1)
912 return i->count; 912 return i->count;
913 else if (i->type & ITER_BVEC) 913 else if (i->type & ITER_BVEC)
914 return min(i->count, i->iov->iov_len - i->iov_offset);
915 else
916 return min(i->count, i->bvec->bv_len - i->iov_offset); 914 return min(i->count, i->bvec->bv_len - i->iov_offset);
915 else
916 return min(i->count, i->iov->iov_len - i->iov_offset);
917} 917}
918EXPORT_SYMBOL(iov_iter_single_seg_count); 918EXPORT_SYMBOL(iov_iter_single_seg_count);
919 919
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 252e1dbbed86..1bf4807cb21e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -31,6 +31,7 @@
31#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/bootmem.h>
34 35
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
36 37
@@ -1066,6 +1067,16 @@ out:
1066} 1067}
1067#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 1068#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1068 1069
1070static void reset_node_present_pages(pg_data_t *pgdat)
1071{
1072 struct zone *z;
1073
1074 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1075 z->present_pages = 0;
1076
1077 pgdat->node_present_pages = 0;
1078}
1079
1069/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1080/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1070static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 1081static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1071{ 1082{
@@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1096 build_all_zonelists(pgdat, NULL); 1107 build_all_zonelists(pgdat, NULL);
1097 mutex_unlock(&zonelists_mutex); 1108 mutex_unlock(&zonelists_mutex);
1098 1109
1110 /*
1111 * zone->managed_pages is set to an approximate value in
1112 * free_area_init_core(), which will cause
1113 * /sys/device/system/node/nodeX/meminfo has wrong data.
1114 * So reset it to 0 before any memory is onlined.
1115 */
1116 reset_node_managed_pages(pgdat);
1117
1118 /*
1119 * When memory is hot-added, all the memory is in offline state. So
1120 * clear all zones' present_pages because they will be updated in
1121 * online_pages() and offline_pages().
1122 */
1123 reset_node_present_pages(pgdat);
1124
1099 return pgdat; 1125 return pgdat;
1100} 1126}
1101 1127
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7c7ab32ee503..90b50468333e 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void)
145 145
146static int reset_managed_pages_done __initdata; 146static int reset_managed_pages_done __initdata;
147 147
148static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 148void reset_node_managed_pages(pg_data_t *pgdat)
149{ 149{
150 struct zone *z; 150 struct zone *z;
151 151
152 if (reset_managed_pages_done)
153 return;
154 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 152 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
155 z->managed_pages = 0; 153 z->managed_pages = 0;
156} 154}
@@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void)
159{ 157{
160 struct pglist_data *pgdat; 158 struct pglist_data *pgdat;
161 159
160 if (reset_managed_pages_done)
161 return;
162
162 for_each_online_pgdat(pgdat) 163 for_each_online_pgdat(pgdat)
163 reset_node_managed_pages(pgdat); 164 reset_node_managed_pages(pgdat);
165
164 reset_managed_pages_done = 1; 166 reset_managed_pages_done = 1;
165} 167}
166 168
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9cd36b822444..616a2c956b4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -467,29 +467,6 @@ static inline void rmv_page_order(struct page *page)
467} 467}
468 468
469/* 469/*
470 * Locate the struct page for both the matching buddy in our
471 * pair (buddy1) and the combined O(n+1) page they form (page).
472 *
473 * 1) Any buddy B1 will have an order O twin B2 which satisfies
474 * the following equation:
475 * B2 = B1 ^ (1 << O)
476 * For example, if the starting buddy (buddy2) is #8 its order
477 * 1 buddy is #10:
478 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
479 *
480 * 2) Any buddy B will have an order O+1 parent P which
481 * satisfies the following equation:
482 * P = B & ~(1 << O)
483 *
484 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
485 */
486static inline unsigned long
487__find_buddy_index(unsigned long page_idx, unsigned int order)
488{
489 return page_idx ^ (1 << order);
490}
491
492/*
493 * This function checks whether a page is free && is the buddy 470 * This function checks whether a page is free && is the buddy
494 * we can do coalesce a page and its buddy if 471 * we can do coalesce a page and its buddy if
495 * (a) the buddy is not in a hole && 472 * (a) the buddy is not in a hole &&
@@ -569,6 +546,7 @@ static inline void __free_one_page(struct page *page,
569 unsigned long combined_idx; 546 unsigned long combined_idx;
570 unsigned long uninitialized_var(buddy_idx); 547 unsigned long uninitialized_var(buddy_idx);
571 struct page *buddy; 548 struct page *buddy;
549 int max_order = MAX_ORDER;
572 550
573 VM_BUG_ON(!zone_is_initialized(zone)); 551 VM_BUG_ON(!zone_is_initialized(zone));
574 552
@@ -577,13 +555,24 @@ static inline void __free_one_page(struct page *page,
577 return; 555 return;
578 556
579 VM_BUG_ON(migratetype == -1); 557 VM_BUG_ON(migratetype == -1);
558 if (is_migrate_isolate(migratetype)) {
559 /*
560 * We restrict max order of merging to prevent merge
561 * between freepages on isolate pageblock and normal
562 * pageblock. Without this, pageblock isolation
563 * could cause incorrect freepage accounting.
564 */
565 max_order = min(MAX_ORDER, pageblock_order + 1);
566 } else {
567 __mod_zone_freepage_state(zone, 1 << order, migratetype);
568 }
580 569
581 page_idx = pfn & ((1 << MAX_ORDER) - 1); 570 page_idx = pfn & ((1 << max_order) - 1);
582 571
583 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 572 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
584 VM_BUG_ON_PAGE(bad_range(zone, page), page); 573 VM_BUG_ON_PAGE(bad_range(zone, page), page);
585 574
586 while (order < MAX_ORDER-1) { 575 while (order < max_order - 1) {
587 buddy_idx = __find_buddy_index(page_idx, order); 576 buddy_idx = __find_buddy_index(page_idx, order);
588 buddy = page + (buddy_idx - page_idx); 577 buddy = page + (buddy_idx - page_idx);
589 if (!page_is_buddy(page, buddy, order)) 578 if (!page_is_buddy(page, buddy, order))
@@ -594,9 +583,11 @@ static inline void __free_one_page(struct page *page,
594 */ 583 */
595 if (page_is_guard(buddy)) { 584 if (page_is_guard(buddy)) {
596 clear_page_guard_flag(buddy); 585 clear_page_guard_flag(buddy);
597 set_page_private(page, 0); 586 set_page_private(buddy, 0);
598 __mod_zone_freepage_state(zone, 1 << order, 587 if (!is_migrate_isolate(migratetype)) {
599 migratetype); 588 __mod_zone_freepage_state(zone, 1 << order,
589 migratetype);
590 }
600 } else { 591 } else {
601 list_del(&buddy->lru); 592 list_del(&buddy->lru);
602 zone->free_area[order].nr_free--; 593 zone->free_area[order].nr_free--;
@@ -715,14 +706,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
715 /* must delete as __free_one_page list manipulates */ 706 /* must delete as __free_one_page list manipulates */
716 list_del(&page->lru); 707 list_del(&page->lru);
717 mt = get_freepage_migratetype(page); 708 mt = get_freepage_migratetype(page);
709 if (unlikely(has_isolate_pageblock(zone)))
710 mt = get_pageblock_migratetype(page);
711
718 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 712 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
719 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 713 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
720 trace_mm_page_pcpu_drain(page, 0, mt); 714 trace_mm_page_pcpu_drain(page, 0, mt);
721 if (likely(!is_migrate_isolate_page(page))) {
722 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
723 if (is_migrate_cma(mt))
724 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
725 }
726 } while (--to_free && --batch_free && !list_empty(list)); 715 } while (--to_free && --batch_free && !list_empty(list));
727 } 716 }
728 spin_unlock(&zone->lock); 717 spin_unlock(&zone->lock);
@@ -739,9 +728,11 @@ static void free_one_page(struct zone *zone,
739 if (nr_scanned) 728 if (nr_scanned)
740 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 729 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
741 730
731 if (unlikely(has_isolate_pageblock(zone) ||
732 is_migrate_isolate(migratetype))) {
733 migratetype = get_pfnblock_migratetype(page, pfn);
734 }
742 __free_one_page(page, pfn, zone, order, migratetype); 735 __free_one_page(page, pfn, zone, order, migratetype);
743 if (unlikely(!is_migrate_isolate(migratetype)))
744 __mod_zone_freepage_state(zone, 1 << order, migratetype);
745 spin_unlock(&zone->lock); 736 spin_unlock(&zone->lock);
746} 737}
747 738
@@ -1484,7 +1475,7 @@ void split_page(struct page *page, unsigned int order)
1484} 1475}
1485EXPORT_SYMBOL_GPL(split_page); 1476EXPORT_SYMBOL_GPL(split_page);
1486 1477
1487static int __isolate_free_page(struct page *page, unsigned int order) 1478int __isolate_free_page(struct page *page, unsigned int order)
1488{ 1479{
1489 unsigned long watermark; 1480 unsigned long watermark;
1490 struct zone *zone; 1481 struct zone *zone;
@@ -6408,13 +6399,12 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6408 6399
6409 /* Make sure the range is really isolated. */ 6400 /* Make sure the range is really isolated. */
6410 if (test_pages_isolated(outer_start, end, false)) { 6401 if (test_pages_isolated(outer_start, end, false)) {
6411 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 6402 pr_info("%s: [%lx, %lx) PFNs busy\n",
6412 outer_start, end); 6403 __func__, outer_start, end);
6413 ret = -EBUSY; 6404 ret = -EBUSY;
6414 goto done; 6405 goto done;
6415 } 6406 }
6416 6407
6417
6418 /* Grab isolated pages from freelists. */ 6408 /* Grab isolated pages from freelists. */
6419 outer_end = isolate_freepages_range(&cc, outer_start, end); 6409 outer_end = isolate_freepages_range(&cc, outer_start, end);
6420 if (!outer_end) { 6410 if (!outer_end) {
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index d1473b2e9481..c8778f7e208e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -60,6 +60,7 @@ out:
60 int migratetype = get_pageblock_migratetype(page); 60 int migratetype = get_pageblock_migratetype(page);
61 61
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 62 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
63 zone->nr_isolate_pageblock++;
63 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
64 65
65 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 66 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
75{ 76{
76 struct zone *zone; 77 struct zone *zone;
77 unsigned long flags, nr_pages; 78 unsigned long flags, nr_pages;
79 struct page *isolated_page = NULL;
80 unsigned int order;
81 unsigned long page_idx, buddy_idx;
82 struct page *buddy;
78 83
79 zone = page_zone(page); 84 zone = page_zone(page);
80 spin_lock_irqsave(&zone->lock, flags); 85 spin_lock_irqsave(&zone->lock, flags);
81 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 86 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
82 goto out; 87 goto out;
83 nr_pages = move_freepages_block(zone, page, migratetype); 88
84 __mod_zone_freepage_state(zone, nr_pages, migratetype); 89 /*
90 * Because freepage with more than pageblock_order on isolated
91 * pageblock is restricted to merge due to freepage counting problem,
92 * it is possible that there is free buddy page.
93 * move_freepages_block() doesn't care of merge so we need other
94 * approach in order to merge them. Isolation and free will make
95 * these pages to be merged.
96 */
97 if (PageBuddy(page)) {
98 order = page_order(page);
99 if (order >= pageblock_order) {
100 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
101 buddy_idx = __find_buddy_index(page_idx, order);
102 buddy = page + (buddy_idx - page_idx);
103
104 if (!is_migrate_isolate_page(buddy)) {
105 __isolate_free_page(page, order);
106 set_page_refcounted(page);
107 isolated_page = page;
108 }
109 }
110 }
111
112 /*
113 * If we isolate freepage with more than pageblock_order, there
114 * should be no freepage in the range, so we could avoid costly
115 * pageblock scanning for freepage moving.
116 */
117 if (!isolated_page) {
118 nr_pages = move_freepages_block(zone, page, migratetype);
119 __mod_zone_freepage_state(zone, nr_pages, migratetype);
120 }
85 set_pageblock_migratetype(page, migratetype); 121 set_pageblock_migratetype(page, migratetype);
122 zone->nr_isolate_pageblock--;
86out: 123out:
87 spin_unlock_irqrestore(&zone->lock, flags); 124 spin_unlock_irqrestore(&zone->lock, flags);
125 if (isolated_page)
126 __free_pages(isolated_page, order);
88} 127}
89 128
90static inline struct page * 129static inline struct page *
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 406944207b61..dcdab81bd240 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -259,6 +259,10 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
259 if (s->size - size >= sizeof(void *)) 259 if (s->size - size >= sizeof(void *))
260 continue; 260 continue;
261 261
262 if (IS_ENABLED(CONFIG_SLAB) && align &&
263 (align > s->align || s->align % align))
264 continue;
265
262 return s; 266 return s;
263 } 267 }
264 return NULL; 268 return NULL;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index 654c9018e3e7..48da2c54a69e 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -18,6 +18,7 @@
18#include <net/netfilter/ipv6/nf_reject.h> 18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h> 19#include <linux/ip.h>
20#include <net/ip.h> 20#include <net/ip.h>
21#include <net/ip6_checksum.h>
21#include <linux/netfilter_bridge.h> 22#include <linux/netfilter_bridge.h>
22#include "../br_private.h" 23#include "../br_private.h"
23 24
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 62fc5e7a9acf..790fe89d90c0 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
90 90
91static const u8 *aes_iv = (u8 *)CEPH_AES_IV; 91static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
92 92
93/*
94 * Should be used for buffers allocated with ceph_kvmalloc().
95 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
96 * in-buffer (msg front).
97 *
98 * Dispose of @sgt with teardown_sgtable().
99 *
100 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
101 * in cases where a single sg is sufficient. No attempt to reduce the
102 * number of sgs by squeezing physically contiguous pages together is
103 * made though, for simplicity.
104 */
105static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
106 const void *buf, unsigned int buf_len)
107{
108 struct scatterlist *sg;
109 const bool is_vmalloc = is_vmalloc_addr(buf);
110 unsigned int off = offset_in_page(buf);
111 unsigned int chunk_cnt = 1;
112 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
113 int i;
114 int ret;
115
116 if (buf_len == 0) {
117 memset(sgt, 0, sizeof(*sgt));
118 return -EINVAL;
119 }
120
121 if (is_vmalloc) {
122 chunk_cnt = chunk_len >> PAGE_SHIFT;
123 chunk_len = PAGE_SIZE;
124 }
125
126 if (chunk_cnt > 1) {
127 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
128 if (ret)
129 return ret;
130 } else {
131 WARN_ON(chunk_cnt != 1);
132 sg_init_table(prealloc_sg, 1);
133 sgt->sgl = prealloc_sg;
134 sgt->nents = sgt->orig_nents = 1;
135 }
136
137 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
138 struct page *page;
139 unsigned int len = min(chunk_len - off, buf_len);
140
141 if (is_vmalloc)
142 page = vmalloc_to_page(buf);
143 else
144 page = virt_to_page(buf);
145
146 sg_set_page(sg, page, len, off);
147
148 off = 0;
149 buf += len;
150 buf_len -= len;
151 }
152 WARN_ON(buf_len != 0);
153
154 return 0;
155}
156
157static void teardown_sgtable(struct sg_table *sgt)
158{
159 if (sgt->orig_nents > 1)
160 sg_free_table(sgt);
161}
162
93static int ceph_aes_encrypt(const void *key, int key_len, 163static int ceph_aes_encrypt(const void *key, int key_len,
94 void *dst, size_t *dst_len, 164 void *dst, size_t *dst_len,
95 const void *src, size_t src_len) 165 const void *src, size_t src_len)
96{ 166{
97 struct scatterlist sg_in[2], sg_out[1]; 167 struct scatterlist sg_in[2], prealloc_sg;
168 struct sg_table sg_out;
98 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 169 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
99 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; 170 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
100 int ret; 171 int ret;
@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
110 181
111 *dst_len = src_len + zero_padding; 182 *dst_len = src_len + zero_padding;
112 183
113 crypto_blkcipher_setkey((void *)tfm, key, key_len);
114 sg_init_table(sg_in, 2); 184 sg_init_table(sg_in, 2);
115 sg_set_buf(&sg_in[0], src, src_len); 185 sg_set_buf(&sg_in[0], src, src_len);
116 sg_set_buf(&sg_in[1], pad, zero_padding); 186 sg_set_buf(&sg_in[1], pad, zero_padding);
117 sg_init_table(sg_out, 1); 187 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
118 sg_set_buf(sg_out, dst, *dst_len); 188 if (ret)
189 goto out_tfm;
190
191 crypto_blkcipher_setkey((void *)tfm, key, key_len);
119 iv = crypto_blkcipher_crt(tfm)->iv; 192 iv = crypto_blkcipher_crt(tfm)->iv;
120 ivsize = crypto_blkcipher_ivsize(tfm); 193 ivsize = crypto_blkcipher_ivsize(tfm);
121
122 memcpy(iv, aes_iv, ivsize); 194 memcpy(iv, aes_iv, ivsize);
195
123 /* 196 /*
124 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, 197 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
125 key, key_len, 1); 198 key, key_len, 1);
@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
128 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, 201 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
129 pad, zero_padding, 1); 202 pad, zero_padding, 1);
130 */ 203 */
131 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 204 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
132 src_len + zero_padding); 205 src_len + zero_padding);
133 crypto_free_blkcipher(tfm); 206 if (ret < 0) {
134 if (ret < 0)
135 pr_err("ceph_aes_crypt failed %d\n", ret); 207 pr_err("ceph_aes_crypt failed %d\n", ret);
208 goto out_sg;
209 }
136 /* 210 /*
137 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, 211 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
138 dst, *dst_len, 1); 212 dst, *dst_len, 1);
139 */ 213 */
140 return 0; 214
215out_sg:
216 teardown_sgtable(&sg_out);
217out_tfm:
218 crypto_free_blkcipher(tfm);
219 return ret;
141} 220}
142 221
143static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, 222static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
145 const void *src1, size_t src1_len, 224 const void *src1, size_t src1_len,
146 const void *src2, size_t src2_len) 225 const void *src2, size_t src2_len)
147{ 226{
148 struct scatterlist sg_in[3], sg_out[1]; 227 struct scatterlist sg_in[3], prealloc_sg;
228 struct sg_table sg_out;
149 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 229 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
150 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; 230 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
151 int ret; 231 int ret;
@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
161 241
162 *dst_len = src1_len + src2_len + zero_padding; 242 *dst_len = src1_len + src2_len + zero_padding;
163 243
164 crypto_blkcipher_setkey((void *)tfm, key, key_len);
165 sg_init_table(sg_in, 3); 244 sg_init_table(sg_in, 3);
166 sg_set_buf(&sg_in[0], src1, src1_len); 245 sg_set_buf(&sg_in[0], src1, src1_len);
167 sg_set_buf(&sg_in[1], src2, src2_len); 246 sg_set_buf(&sg_in[1], src2, src2_len);
168 sg_set_buf(&sg_in[2], pad, zero_padding); 247 sg_set_buf(&sg_in[2], pad, zero_padding);
169 sg_init_table(sg_out, 1); 248 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
170 sg_set_buf(sg_out, dst, *dst_len); 249 if (ret)
250 goto out_tfm;
251
252 crypto_blkcipher_setkey((void *)tfm, key, key_len);
171 iv = crypto_blkcipher_crt(tfm)->iv; 253 iv = crypto_blkcipher_crt(tfm)->iv;
172 ivsize = crypto_blkcipher_ivsize(tfm); 254 ivsize = crypto_blkcipher_ivsize(tfm);
173
174 memcpy(iv, aes_iv, ivsize); 255 memcpy(iv, aes_iv, ivsize);
256
175 /* 257 /*
176 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, 258 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
177 key, key_len, 1); 259 key, key_len, 1);
@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
182 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, 264 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
183 pad, zero_padding, 1); 265 pad, zero_padding, 1);
184 */ 266 */
185 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 267 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
186 src1_len + src2_len + zero_padding); 268 src1_len + src2_len + zero_padding);
187 crypto_free_blkcipher(tfm); 269 if (ret < 0) {
188 if (ret < 0)
189 pr_err("ceph_aes_crypt2 failed %d\n", ret); 270 pr_err("ceph_aes_crypt2 failed %d\n", ret);
271 goto out_sg;
272 }
190 /* 273 /*
191 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, 274 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
192 dst, *dst_len, 1); 275 dst, *dst_len, 1);
193 */ 276 */
194 return 0; 277
278out_sg:
279 teardown_sgtable(&sg_out);
280out_tfm:
281 crypto_free_blkcipher(tfm);
282 return ret;
195} 283}
196 284
197static int ceph_aes_decrypt(const void *key, int key_len, 285static int ceph_aes_decrypt(const void *key, int key_len,
198 void *dst, size_t *dst_len, 286 void *dst, size_t *dst_len,
199 const void *src, size_t src_len) 287 const void *src, size_t src_len)
200{ 288{
201 struct scatterlist sg_in[1], sg_out[2]; 289 struct sg_table sg_in;
290 struct scatterlist sg_out[2], prealloc_sg;
202 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 291 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
203 struct blkcipher_desc desc = { .tfm = tfm }; 292 struct blkcipher_desc desc = { .tfm = tfm };
204 char pad[16]; 293 char pad[16];
@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
210 if (IS_ERR(tfm)) 299 if (IS_ERR(tfm))
211 return PTR_ERR(tfm); 300 return PTR_ERR(tfm);
212 301
213 crypto_blkcipher_setkey((void *)tfm, key, key_len);
214 sg_init_table(sg_in, 1);
215 sg_init_table(sg_out, 2); 302 sg_init_table(sg_out, 2);
216 sg_set_buf(sg_in, src, src_len);
217 sg_set_buf(&sg_out[0], dst, *dst_len); 303 sg_set_buf(&sg_out[0], dst, *dst_len);
218 sg_set_buf(&sg_out[1], pad, sizeof(pad)); 304 sg_set_buf(&sg_out[1], pad, sizeof(pad));
305 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
306 if (ret)
307 goto out_tfm;
219 308
309 crypto_blkcipher_setkey((void *)tfm, key, key_len);
220 iv = crypto_blkcipher_crt(tfm)->iv; 310 iv = crypto_blkcipher_crt(tfm)->iv;
221 ivsize = crypto_blkcipher_ivsize(tfm); 311 ivsize = crypto_blkcipher_ivsize(tfm);
222
223 memcpy(iv, aes_iv, ivsize); 312 memcpy(iv, aes_iv, ivsize);
224 313
225 /* 314 /*
@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
228 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, 317 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
229 src, src_len, 1); 318 src, src_len, 1);
230 */ 319 */
231 320 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
232 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
233 crypto_free_blkcipher(tfm);
234 if (ret < 0) { 321 if (ret < 0) {
235 pr_err("ceph_aes_decrypt failed %d\n", ret); 322 pr_err("ceph_aes_decrypt failed %d\n", ret);
236 return ret; 323 goto out_sg;
237 } 324 }
238 325
239 if (src_len <= *dst_len) 326 if (src_len <= *dst_len)
@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
251 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, 338 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
252 dst, *dst_len, 1); 339 dst, *dst_len, 1);
253 */ 340 */
254 return 0; 341
342out_sg:
343 teardown_sgtable(&sg_in);
344out_tfm:
345 crypto_free_blkcipher(tfm);
346 return ret;
255} 347}
256 348
257static int ceph_aes_decrypt2(const void *key, int key_len, 349static int ceph_aes_decrypt2(const void *key, int key_len,
@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
259 void *dst2, size_t *dst2_len, 351 void *dst2, size_t *dst2_len,
260 const void *src, size_t src_len) 352 const void *src, size_t src_len)
261{ 353{
262 struct scatterlist sg_in[1], sg_out[3]; 354 struct sg_table sg_in;
355 struct scatterlist sg_out[3], prealloc_sg;
263 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 356 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
264 struct blkcipher_desc desc = { .tfm = tfm }; 357 struct blkcipher_desc desc = { .tfm = tfm };
265 char pad[16]; 358 char pad[16];
@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
271 if (IS_ERR(tfm)) 364 if (IS_ERR(tfm))
272 return PTR_ERR(tfm); 365 return PTR_ERR(tfm);
273 366
274 sg_init_table(sg_in, 1);
275 sg_set_buf(sg_in, src, src_len);
276 sg_init_table(sg_out, 3); 367 sg_init_table(sg_out, 3);
277 sg_set_buf(&sg_out[0], dst1, *dst1_len); 368 sg_set_buf(&sg_out[0], dst1, *dst1_len);
278 sg_set_buf(&sg_out[1], dst2, *dst2_len); 369 sg_set_buf(&sg_out[1], dst2, *dst2_len);
279 sg_set_buf(&sg_out[2], pad, sizeof(pad)); 370 sg_set_buf(&sg_out[2], pad, sizeof(pad));
371 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
372 if (ret)
373 goto out_tfm;
280 374
281 crypto_blkcipher_setkey((void *)tfm, key, key_len); 375 crypto_blkcipher_setkey((void *)tfm, key, key_len);
282 iv = crypto_blkcipher_crt(tfm)->iv; 376 iv = crypto_blkcipher_crt(tfm)->iv;
283 ivsize = crypto_blkcipher_ivsize(tfm); 377 ivsize = crypto_blkcipher_ivsize(tfm);
284
285 memcpy(iv, aes_iv, ivsize); 378 memcpy(iv, aes_iv, ivsize);
286 379
287 /* 380 /*
@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
290 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, 383 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
291 src, src_len, 1); 384 src, src_len, 1);
292 */ 385 */
293 386 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
294 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
295 crypto_free_blkcipher(tfm);
296 if (ret < 0) { 387 if (ret < 0) {
297 pr_err("ceph_aes_decrypt failed %d\n", ret); 388 pr_err("ceph_aes_decrypt failed %d\n", ret);
298 return ret; 389 goto out_sg;
299 } 390 }
300 391
301 if (src_len <= *dst1_len) 392 if (src_len <= *dst1_len)
@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
325 dst2, *dst2_len, 1); 416 dst2, *dst2_len, 1);
326 */ 417 */
327 418
328 return 0; 419out_sg:
420 teardown_sgtable(&sg_in);
421out_tfm:
422 crypto_free_blkcipher(tfm);
423 return ret;
329} 424}
330 425
331 426
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index f3fc54eac09d..6f164289bde8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd)
1007static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 1007static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1008{ 1008{
1009 dout("__remove_osd %p\n", osd); 1009 dout("__remove_osd %p\n", osd);
1010 BUG_ON(!list_empty(&osd->o_requests)); 1010 WARN_ON(!list_empty(&osd->o_requests));
1011 BUG_ON(!list_empty(&osd->o_linger_requests)); 1011 WARN_ON(!list_empty(&osd->o_linger_requests));
1012 1012
1013 rb_erase(&osd->o_node, &osdc->osds); 1013 rb_erase(&osd->o_node, &osdc->osds);
1014 list_del_init(&osd->o_osd_lru); 1014 list_del_init(&osd->o_osd_lru);
@@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
1254 if (list_empty(&req->r_osd_item)) 1254 if (list_empty(&req->r_osd_item))
1255 req->r_osd = NULL; 1255 req->r_osd = NULL;
1256 } 1256 }
1257
1258 list_del_init(&req->r_req_lru_item); /* can be on notarget */
1257 ceph_osdc_put_request(req); 1259 ceph_osdc_put_request(req);
1258} 1260}
1259 1261
@@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc,
1395 if (req->r_osd) { 1397 if (req->r_osd) {
1396 __cancel_request(req); 1398 __cancel_request(req);
1397 list_del_init(&req->r_osd_item); 1399 list_del_init(&req->r_osd_item);
1400 list_del_init(&req->r_linger_osd_item);
1398 req->r_osd = NULL; 1401 req->r_osd = NULL;
1399 } 1402 }
1400 1403
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6d1817449c36..ab03e00ffe8f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
489 /* We could not connect to a designated PHY, so use the switch internal 489 /* We could not connect to a designated PHY, so use the switch internal
490 * MDIO bus instead 490 * MDIO bus instead
491 */ 491 */
492 if (!p->phy) 492 if (!p->phy) {
493 p->phy = ds->slave_mii_bus->phy_map[p->port]; 493 p->phy = ds->slave_mii_bus->phy_map[p->port];
494 else 494 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
495 p->phy_interface);
496 } else {
495 pr_info("attached PHY at address %d [%s]\n", 497 pr_info("attached PHY at address %d [%s]\n",
496 p->phy->addr, p->phy->drv->name); 498 p->phy->addr, p->phy->drv->name);
499 }
497} 500}
498 501
499int dsa_slave_suspend(struct net_device *slave_dev) 502int dsa_slave_suspend(struct net_device *slave_dev)
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 32e78924e246..606c520ffd5a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
133 int err = -ENOSYS; 133 int err = -ENOSYS;
134 const struct net_offload **offloads; 134 const struct net_offload **offloads;
135 135
136 udp_tunnel_gro_complete(skb, nhoff);
137
136 rcu_read_lock(); 138 rcu_read_lock();
137 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 139 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
138 ops = rcu_dereference(offloads[proto]); 140 ops = rcu_dereference(offloads[proto]);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 065cd94c640c..dedb21e99914 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
146 146
147 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
148
147 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, 149 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
148 tos, ttl, df, src_port, dst_port, xnet); 150 tos, ttl, df, src_port, dst_port, xnet);
149} 151}
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
364static void __exit geneve_cleanup_module(void) 366static void __exit geneve_cleanup_module(void)
365{ 367{
366 destroy_workqueue(geneve_wq); 368 destroy_workqueue(geneve_wq);
369 unregister_pernet_subsys(&geneve_net_ops);
367} 370}
368module_exit(geneve_cleanup_module); 371module_exit(geneve_cleanup_module);
369 372
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c373a9ad4555..9daf2177dc00 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
196 if (!CMSG_OK(msg, cmsg)) 196 if (!CMSG_OK(msg, cmsg))
197 return -EINVAL; 197 return -EINVAL;
198#if defined(CONFIG_IPV6) 198#if IS_ENABLED(CONFIG_IPV6)
199 if (allow_ipv6 && 199 if (allow_ipv6 &&
200 cmsg->cmsg_level == SOL_IPV6 && 200 cmsg->cmsg_level == SOL_IPV6 &&
201 cmsg->cmsg_type == IPV6_PKTINFO) { 201 cmsg->cmsg_type == IPV6_PKTINFO) {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a12b455928e5..88fa2d160685 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2315 2315
2316/* Undo procedures. */ 2316/* Undo procedures. */
2317 2317
2318/* We can clear retrans_stamp when there are no retransmissions in the
2319 * window. It would seem that it is trivially available for us in
2320 * tp->retrans_out, however, that kind of assumptions doesn't consider
2321 * what will happen if errors occur when sending retransmission for the
2322 * second time. ...It could the that such segment has only
2323 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2324 * the head skb is enough except for some reneging corner cases that
2325 * are not worth the effort.
2326 *
2327 * Main reason for all this complexity is the fact that connection dying
2328 * time now depends on the validity of the retrans_stamp, in particular,
2329 * that successive retransmissions of a segment must not advance
2330 * retrans_stamp under any conditions.
2331 */
2332static bool tcp_any_retrans_done(const struct sock *sk)
2333{
2334 const struct tcp_sock *tp = tcp_sk(sk);
2335 struct sk_buff *skb;
2336
2337 if (tp->retrans_out)
2338 return true;
2339
2340 skb = tcp_write_queue_head(sk);
2341 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2342 return true;
2343
2344 return false;
2345}
2346
2318#if FASTRETRANS_DEBUG > 1 2347#if FASTRETRANS_DEBUG > 1
2319static void DBGUNDO(struct sock *sk, const char *msg) 2348static void DBGUNDO(struct sock *sk, const char *msg)
2320{ 2349{
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2410 * is ACKed. For Reno it is MUST to prevent false 2439 * is ACKed. For Reno it is MUST to prevent false
2411 * fast retransmits (RFC2582). SACK TCP is safe. */ 2440 * fast retransmits (RFC2582). SACK TCP is safe. */
2412 tcp_moderate_cwnd(tp); 2441 tcp_moderate_cwnd(tp);
2442 if (!tcp_any_retrans_done(sk))
2443 tp->retrans_stamp = 0;
2413 return true; 2444 return true;
2414 } 2445 }
2415 tcp_set_ca_state(sk, TCP_CA_Open); 2446 tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
2430 return false; 2461 return false;
2431} 2462}
2432 2463
2433/* We can clear retrans_stamp when there are no retransmissions in the
2434 * window. It would seem that it is trivially available for us in
2435 * tp->retrans_out, however, that kind of assumptions doesn't consider
2436 * what will happen if errors occur when sending retransmission for the
2437 * second time. ...It could the that such segment has only
2438 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2439 * the head skb is enough except for some reneging corner cases that
2440 * are not worth the effort.
2441 *
2442 * Main reason for all this complexity is the fact that connection dying
2443 * time now depends on the validity of the retrans_stamp, in particular,
2444 * that successive retransmissions of a segment must not advance
2445 * retrans_stamp under any conditions.
2446 */
2447static bool tcp_any_retrans_done(const struct sock *sk)
2448{
2449 const struct tcp_sock *tp = tcp_sk(sk);
2450 struct sk_buff *skb;
2451
2452 if (tp->retrans_out)
2453 return true;
2454
2455 skb = tcp_write_queue_head(sk);
2456 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2457 return true;
2458
2459 return false;
2460}
2461
2462/* Undo during loss recovery after partial ACK or using F-RTO. */ 2464/* Undo during loss recovery after partial ACK or using F-RTO. */
2463static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2465static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2464{ 2466{
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 12c3c8ef3849..4564e1fca3eb 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
961 else 961 else
962 dev->flags &= ~IFF_POINTOPOINT; 962 dev->flags &= ~IFF_POINTOPOINT;
963 963
964 dev->iflink = p->link;
965
966 /* Precalculate GRE options length */ 964 /* Precalculate GRE options length */
967 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { 965 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
968 if (t->parms.o_flags&GRE_CSUM) 966 if (t->parms.o_flags&GRE_CSUM)
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1272 u64_stats_init(&ip6gre_tunnel_stats->syncp); 1270 u64_stats_init(&ip6gre_tunnel_stats->syncp);
1273 } 1271 }
1274 1272
1273 dev->iflink = tunnel->parms.link;
1275 1274
1276 return 0; 1275 return 0;
1277} 1276}
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
1481 if (!dev->tstats) 1480 if (!dev->tstats)
1482 return -ENOMEM; 1481 return -ENOMEM;
1483 1482
1483 dev->iflink = tunnel->parms.link;
1484
1484 return 0; 1485 return 0;
1485} 1486}
1486 1487
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9409887fb664..9cb94cfa0ae7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
272 int err; 272 int err;
273 273
274 t = netdev_priv(dev); 274 t = netdev_priv(dev);
275 err = ip6_tnl_dev_init(dev);
276 if (err < 0)
277 goto out;
278 275
279 err = register_netdevice(dev); 276 err = register_netdevice(dev);
280 if (err < 0) 277 if (err < 0)
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1462 1459
1463 1460
1464static const struct net_device_ops ip6_tnl_netdev_ops = { 1461static const struct net_device_ops ip6_tnl_netdev_ops = {
1462 .ndo_init = ip6_tnl_dev_init,
1465 .ndo_uninit = ip6_tnl_dev_uninit, 1463 .ndo_uninit = ip6_tnl_dev_uninit,
1466 .ndo_start_xmit = ip6_tnl_xmit, 1464 .ndo_start_xmit = ip6_tnl_xmit,
1467 .ndo_do_ioctl = ip6_tnl_ioctl, 1465 .ndo_do_ioctl = ip6_tnl_ioctl,
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1546 struct ip6_tnl *t = netdev_priv(dev); 1544 struct ip6_tnl *t = netdev_priv(dev);
1547 struct net *net = dev_net(dev); 1545 struct net *net = dev_net(dev);
1548 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1546 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1549 int err = ip6_tnl_dev_init_gen(dev);
1550
1551 if (err)
1552 return err;
1553 1547
1554 t->parms.proto = IPPROTO_IPV6; 1548 t->parms.proto = IPPROTO_IPV6;
1555 dev_hold(dev); 1549 dev_hold(dev);
1556 1550
1557 ip6_tnl_link_config(t);
1558
1559 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1551 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1560 return 0; 1552 return 0;
1561} 1553}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d440bb585524..31089d153fd3 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
172 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 172 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
173 int err; 173 int err;
174 174
175 err = vti6_dev_init(dev);
176 if (err < 0)
177 goto out;
178
179 err = register_netdevice(dev); 175 err = register_netdevice(dev);
180 if (err < 0) 176 if (err < 0)
181 goto out; 177 goto out;
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
783} 779}
784 780
785static const struct net_device_ops vti6_netdev_ops = { 781static const struct net_device_ops vti6_netdev_ops = {
782 .ndo_init = vti6_dev_init,
786 .ndo_uninit = vti6_dev_uninit, 783 .ndo_uninit = vti6_dev_uninit,
787 .ndo_start_xmit = vti6_tnl_xmit, 784 .ndo_start_xmit = vti6_tnl_xmit,
788 .ndo_do_ioctl = vti6_ioctl, 785 .ndo_do_ioctl = vti6_ioctl,
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
852 struct ip6_tnl *t = netdev_priv(dev); 849 struct ip6_tnl *t = netdev_priv(dev);
853 struct net *net = dev_net(dev); 850 struct net *net = dev_net(dev);
854 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 851 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
855 int err = vti6_dev_init_gen(dev);
856
857 if (err)
858 return err;
859 852
860 t->parms.proto = IPPROTO_IPV6; 853 t->parms.proto = IPPROTO_IPV6;
861 dev_hold(dev); 854 dev_hold(dev);
862 855
863 vti6_link_config(t);
864
865 rcu_assign_pointer(ip6n->tnls_wc[0], t); 856 rcu_assign_pointer(ip6n->tnls_wc[0], t);
866 return 0; 857 return 0;
867} 858}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 58e5b4710127..a24557a1c1d8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
195 struct sit_net *sitn = net_generic(net, sit_net_id); 195 struct sit_net *sitn = net_generic(net, sit_net_id);
196 int err; 196 int err;
197 197
198 err = ipip6_tunnel_init(dev); 198 memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
199 if (err < 0) 199 memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
200 goto out;
201 ipip6_tunnel_clone_6rd(dev, sitn);
202 200
203 if ((__force u16)t->parms.i_flags & SIT_ISATAP) 201 if ((__force u16)t->parms.i_flags & SIT_ISATAP)
204 dev->priv_flags |= IFF_ISATAP; 202 dev->priv_flags |= IFF_ISATAP;
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
207 if (err < 0) 205 if (err < 0)
208 goto out; 206 goto out;
209 207
210 strcpy(t->parms.name, dev->name); 208 ipip6_tunnel_clone_6rd(dev, sitn);
209
211 dev->rtnl_link_ops = &sit_link_ops; 210 dev->rtnl_link_ops = &sit_link_ops;
212 211
213 dev_hold(dev); 212 dev_hold(dev);
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1330} 1329}
1331 1330
1332static const struct net_device_ops ipip6_netdev_ops = { 1331static const struct net_device_ops ipip6_netdev_ops = {
1332 .ndo_init = ipip6_tunnel_init,
1333 .ndo_uninit = ipip6_tunnel_uninit, 1333 .ndo_uninit = ipip6_tunnel_uninit,
1334 .ndo_start_xmit = sit_tunnel_xmit, 1334 .ndo_start_xmit = sit_tunnel_xmit,
1335 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1335 .ndo_do_ioctl = ipip6_tunnel_ioctl,
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
1378 1378
1379 tunnel->dev = dev; 1379 tunnel->dev = dev;
1380 tunnel->net = dev_net(dev); 1380 tunnel->net = dev_net(dev);
1381 1381 strcpy(tunnel->parms.name, dev->name);
1382 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1383 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1384 1382
1385 ipip6_tunnel_bind_dev(dev); 1383 ipip6_tunnel_bind_dev(dev);
1386 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1384 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1405 1403
1406 tunnel->dev = dev; 1404 tunnel->dev = dev;
1407 tunnel->net = dev_net(dev); 1405 tunnel->net = dev_net(dev);
1408 strcpy(tunnel->parms.name, dev->name);
1409 1406
1410 iph->version = 4; 1407 iph->version = 4;
1411 iph->protocol = IPPROTO_IPV6; 1408 iph->protocol = IPPROTO_IPV6;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56b53571c807..509bc157ce55 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
805 805
806 memset(&params, 0, sizeof(params)); 806 memset(&params, 0, sizeof(params));
807 memset(&csa_ie, 0, sizeof(csa_ie)); 807 memset(&csa_ie, 0, sizeof(csa_ie));
808 err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, 808 err = ieee80211_parse_ch_switch_ie(sdata, elems,
809 ifibss->chandef.chan->band, 809 ifibss->chandef.chan->band,
810 sta_flags, ifibss->bssid, &csa_ie); 810 sta_flags, ifibss->bssid, &csa_ie);
811 /* can't switch to destination channel, fail */ 811 /* can't switch to destination channel, fail */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c2aaec4dfcf0..8c68da30595d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1642 * ieee80211_parse_ch_switch_ie - parses channel switch IEs 1642 * ieee80211_parse_ch_switch_ie - parses channel switch IEs
1643 * @sdata: the sdata of the interface which has received the frame 1643 * @sdata: the sdata of the interface which has received the frame
1644 * @elems: parsed 802.11 elements received with the frame 1644 * @elems: parsed 802.11 elements received with the frame
1645 * @beacon: indicates if the frame was a beacon or probe response
1646 * @current_band: indicates the current band 1645 * @current_band: indicates the current band
1647 * @sta_flags: contains information about own capabilities and restrictions 1646 * @sta_flags: contains information about own capabilities and restrictions
1648 * to decide which channel switch announcements can be accepted. Only the 1647 * to decide which channel switch announcements can be accepted. Only the
@@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1656 * Return: 0 on success, <0 on error and >0 if there is nothing to parse. 1655 * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
1657 */ 1656 */
1658int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, 1657int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
1659 struct ieee802_11_elems *elems, bool beacon, 1658 struct ieee802_11_elems *elems,
1660 enum ieee80211_band current_band, 1659 enum ieee80211_band current_band,
1661 u32 sta_flags, u8 *bssid, 1660 u32 sta_flags, u8 *bssid,
1662 struct ieee80211_csa_ie *csa_ie); 1661 struct ieee80211_csa_ie *csa_ie);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index af237223a8cd..653f5eb07a27 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
766 int i, flushed; 766 int i, flushed;
767 struct ps_data *ps; 767 struct ps_data *ps;
768 struct cfg80211_chan_def chandef; 768 struct cfg80211_chan_def chandef;
769 bool cancel_scan;
769 770
770 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 771 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
771 772
772 if (rcu_access_pointer(local->scan_sdata) == sdata) 773 cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
774 if (cancel_scan)
773 ieee80211_scan_cancel(local); 775 ieee80211_scan_cancel(local);
774 776
775 /* 777 /*
@@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
898 list_del(&sdata->u.vlan.list); 900 list_del(&sdata->u.vlan.list);
899 mutex_unlock(&local->mtx); 901 mutex_unlock(&local->mtx);
900 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); 902 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
903 /* see comment in the default case below */
904 ieee80211_free_keys(sdata, true);
901 /* no need to tell driver */ 905 /* no need to tell driver */
902 break; 906 break;
903 case NL80211_IFTYPE_MONITOR: 907 case NL80211_IFTYPE_MONITOR:
@@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
923 /* 927 /*
924 * When we get here, the interface is marked down. 928 * When we get here, the interface is marked down.
925 * Free the remaining keys, if there are any 929 * Free the remaining keys, if there are any
926 * (shouldn't be, except maybe in WDS mode?) 930 * (which can happen in AP mode if userspace sets
931 * keys before the interface is operating, and maybe
932 * also in WDS mode)
927 * 933 *
928 * Force the key freeing to always synchronize_net() 934 * Force the key freeing to always synchronize_net()
929 * to wait for the RX path in case it is using this 935 * to wait for the RX path in case it is using this
930 * interface enqueuing frames * at this very time on 936 * interface enqueuing frames at this very time on
931 * another CPU. 937 * another CPU.
932 */ 938 */
933 ieee80211_free_keys(sdata, true); 939 ieee80211_free_keys(sdata, true);
934
935 /* fall through */
936 case NL80211_IFTYPE_AP:
937 skb_queue_purge(&sdata->skb_queue); 940 skb_queue_purge(&sdata->skb_queue);
938 } 941 }
939 942
@@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
991 994
992 ieee80211_recalc_ps(local, -1); 995 ieee80211_recalc_ps(local, -1);
993 996
997 if (cancel_scan)
998 flush_delayed_work(&local->scan_work);
999
994 if (local->open_count == 0) { 1000 if (local->open_count == 0) {
995 ieee80211_stop_device(local); 1001 ieee80211_stop_device(local);
996 1002
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e9f99c1e3fad..0c8b2a77d312 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
874 874
875 memset(&params, 0, sizeof(params)); 875 memset(&params, 0, sizeof(params));
876 memset(&csa_ie, 0, sizeof(csa_ie)); 876 memset(&csa_ie, 0, sizeof(csa_ie));
877 err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, 877 err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
878 sta_flags, sdata->vif.addr, 878 sta_flags, sdata->vif.addr,
879 &csa_ie); 879 &csa_ie);
880 if (err < 0) 880 if (err < 0)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2de88704278b..93af0f1c9d99 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1072 1072
1073 current_band = cbss->channel->band; 1073 current_band = cbss->channel->band;
1074 memset(&csa_ie, 0, sizeof(csa_ie)); 1074 memset(&csa_ie, 0, sizeof(csa_ie));
1075 res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, 1075 res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
1076 ifmgd->flags, 1076 ifmgd->flags,
1077 ifmgd->associated->bssid, &csa_ie); 1077 ifmgd->associated->bssid, &csa_ie);
1078 if (res < 0) 1078 if (res < 0)
@@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1168 ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); 1168 ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
1169 else 1169 else
1170 mod_timer(&ifmgd->chswitch_timer, 1170 mod_timer(&ifmgd->chswitch_timer,
1171 TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); 1171 TU_TO_EXP_TIME((csa_ie.count - 1) *
1172 cbss->beacon_interval));
1172} 1173}
1173 1174
1174static bool 1175static bool
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index b04ca4049c95..a37f9af634cb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1678 sc = le16_to_cpu(hdr->seq_ctrl); 1678 sc = le16_to_cpu(hdr->seq_ctrl);
1679 frag = sc & IEEE80211_SCTL_FRAG; 1679 frag = sc & IEEE80211_SCTL_FRAG;
1680 1680
1681 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1681 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1682 is_multicast_ether_addr(hdr->addr1))) { 1682 goto out;
1683 /* not fragmented */ 1683
1684 if (is_multicast_ether_addr(hdr->addr1)) {
1685 rx->local->dot11MulticastReceivedFrameCount++;
1684 goto out; 1686 goto out;
1685 } 1687 }
1688
1686 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1689 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1687 1690
1688 if (skb_linearize(rx->skb)) 1691 if (skb_linearize(rx->skb))
@@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1775 out: 1778 out:
1776 if (rx->sta) 1779 if (rx->sta)
1777 rx->sta->rx_packets++; 1780 rx->sta->rx_packets++;
1778 if (is_multicast_ether_addr(hdr->addr1)) 1781 ieee80211_led_rx(rx->local);
1779 rx->local->dot11MulticastReceivedFrameCount++;
1780 else
1781 ieee80211_led_rx(rx->local);
1782 return RX_CONTINUE; 1782 return RX_CONTINUE;
1783} 1783}
1784 1784
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 6ab009070084..efeba56c913b 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -22,7 +22,7 @@
22#include "wme.h" 22#include "wme.h"
23 23
24int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, 24int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
25 struct ieee802_11_elems *elems, bool beacon, 25 struct ieee802_11_elems *elems,
26 enum ieee80211_band current_band, 26 enum ieee80211_band current_band,
27 u32 sta_flags, u8 *bssid, 27 u32 sta_flags, u8 *bssid,
28 struct ieee80211_csa_ie *csa_ie) 28 struct ieee80211_csa_ie *csa_ie)
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
91 return -EINVAL; 91 return -EINVAL;
92 } 92 }
93 93
94 if (!beacon && sec_chan_offs) { 94 if (sec_chan_offs) {
95 secondary_channel_offset = sec_chan_offs->sec_chan_offs; 95 secondary_channel_offset = sec_chan_offs->sec_chan_offs;
96 } else if (beacon && ht_oper) {
97 secondary_channel_offset =
98 ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
99 } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { 96 } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
100 /* If it's not a beacon, HT is enabled and the IE not present, 97 /* If the secondary channel offset IE is not present,
101 * it's 20 MHz, 802.11-2012 8.5.2.6: 98 * we can't know what's the post-CSA offset, so the
102 * This element [the Secondary Channel Offset Element] is 99 * best we can do is use 20MHz.
103 * present when switching to a 40 MHz channel. It may be 100 */
104 * present when switching to a 20 MHz channel (in which
105 * case the secondary channel offset is set to SCN).
106 */
107 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; 101 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
108 } 102 }
109 103
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f1de72de273e..0007b8180397 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1440,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups,
1440 return; 1440 return;
1441 1441
1442 for (undo = 0; undo < group; undo++) 1442 for (undo = 0; undo < group; undo++)
1443 if (test_bit(group, &groups)) 1443 if (test_bit(undo, &groups))
1444 nlk->netlink_unbind(undo); 1444 nlk->netlink_unbind(undo);
1445} 1445}
1446 1446
@@ -1492,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1492 netlink_insert(sk, net, nladdr->nl_pid) : 1492 netlink_insert(sk, net, nladdr->nl_pid) :
1493 netlink_autobind(sock); 1493 netlink_autobind(sock);
1494 if (err) { 1494 if (err) {
1495 netlink_unbind(nlk->ngroups - 1, groups, nlk); 1495 netlink_unbind(nlk->ngroups, groups, nlk);
1496 return err; 1496 return err;
1497 } 1497 }
1498 } 1498 }
@@ -2509,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
2509 nl_table[unit].module = module; 2509 nl_table[unit].module = module;
2510 if (cfg) { 2510 if (cfg) {
2511 nl_table[unit].bind = cfg->bind; 2511 nl_table[unit].bind = cfg->bind;
2512 nl_table[unit].unbind = cfg->unbind;
2512 nl_table[unit].flags = cfg->flags; 2513 nl_table[unit].flags = cfg->flags;
2513 if (cfg->compare) 2514 if (cfg->compare)
2514 nl_table[unit].compare = cfg->compare; 2515 nl_table[unit].compare = cfg->compare;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 0e8529113dc5..fb7976aee61c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
862 list_add(&cur_key->key_list, sh_keys); 862 list_add(&cur_key->key_list, sh_keys);
863 863
864 cur_key->key = key; 864 cur_key->key = key;
865 sctp_auth_key_hold(key);
866
867 return 0; 865 return 0;
868nomem: 866nomem:
869 if (!replace) 867 if (!replace)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ab734be8cb20..9f32741abb1c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2609,6 +2609,9 @@ do_addr_param:
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(param.p->type));
2612 if (af == NULL)
2613 break;
2614
2612 af->from_addr_param(&addr, addr_param, 2615 af->from_addr_param(&addr, addr_param,
2613 htons(asoc->peer.port), 0); 2616 htons(asoc->peer.port), 0);
2614 2617
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e66314138b38..c603b20356ad 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4725,9 +4725,10 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
4725 err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm); 4725 err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
4726 if (err) { 4726 if (err) {
4727 if (err == -EINVAL) { 4727 if (err == -EINVAL) {
4728 WARN_ONCE(1, "selinux_nlmsg_perm: unrecognized netlink message:" 4728 printk(KERN_WARNING
4729 " protocol=%hu nlmsg_type=%hu sclass=%hu\n", 4729 "SELinux: unrecognized netlink message:"
4730 sk->sk_protocol, nlh->nlmsg_type, sksec->sclass); 4730 " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
4731 sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
4731 if (!selinux_enforcing || security_get_allow_unknown()) 4732 if (!selinux_enforcing || security_get_allow_unknown())
4732 err = 0; 4733 err = 0;
4733 } 4734 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 9ab1e631cb32..16660f312043 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -219,6 +219,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
219 "{Intel, LPT_LP}," 219 "{Intel, LPT_LP},"
220 "{Intel, WPT_LP}," 220 "{Intel, WPT_LP},"
221 "{Intel, SPT}," 221 "{Intel, SPT},"
222 "{Intel, SPT_LP},"
222 "{Intel, HPT}," 223 "{Intel, HPT},"
223 "{Intel, PBG}," 224 "{Intel, PBG},"
224 "{Intel, SCH}," 225 "{Intel, SCH},"
@@ -2004,6 +2005,9 @@ static const struct pci_device_id azx_ids[] = {
2004 /* Sunrise Point */ 2005 /* Sunrise Point */
2005 { PCI_DEVICE(0x8086, 0xa170), 2006 { PCI_DEVICE(0x8086, 0xa170),
2006 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 2007 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
2008 /* Sunrise Point-LP */
2009 { PCI_DEVICE(0x8086, 0x9d70),
2010 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
2007 /* Haswell */ 2011 /* Haswell */
2008 { PCI_DEVICE(0x8086, 0x0a0c), 2012 { PCI_DEVICE(0x8086, 0x0a0c),
2009 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2013 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 71e4bad06345..e9ebc7bd752c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -43,6 +43,7 @@ struct conexant_spec {
43 unsigned int num_eapds; 43 unsigned int num_eapds;
44 hda_nid_t eapds[4]; 44 hda_nid_t eapds[4];
45 bool dynamic_eapd; 45 bool dynamic_eapd;
46 hda_nid_t mute_led_eapd;
46 47
47 unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */ 48 unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
48 49
@@ -163,6 +164,17 @@ static void cx_auto_vmaster_hook(void *private_data, int enabled)
163 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled); 164 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled);
164} 165}
165 166
167/* turn on/off EAPD according to Master switch (inversely!) for mute LED */
168static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled)
169{
170 struct hda_codec *codec = private_data;
171 struct conexant_spec *spec = codec->spec;
172
173 snd_hda_codec_write(codec, spec->mute_led_eapd, 0,
174 AC_VERB_SET_EAPD_BTLENABLE,
175 enabled ? 0x00 : 0x02);
176}
177
166static int cx_auto_build_controls(struct hda_codec *codec) 178static int cx_auto_build_controls(struct hda_codec *codec)
167{ 179{
168 int err; 180 int err;
@@ -223,6 +235,7 @@ enum {
223 CXT_FIXUP_TOSHIBA_P105, 235 CXT_FIXUP_TOSHIBA_P105,
224 CXT_FIXUP_HP_530, 236 CXT_FIXUP_HP_530,
225 CXT_FIXUP_CAP_MIX_AMP_5047, 237 CXT_FIXUP_CAP_MIX_AMP_5047,
238 CXT_FIXUP_MUTE_LED_EAPD,
226}; 239};
227 240
228/* for hda_fixup_thinkpad_acpi() */ 241/* for hda_fixup_thinkpad_acpi() */
@@ -557,6 +570,18 @@ static void cxt_fixup_olpc_xo(struct hda_codec *codec,
557 } 570 }
558} 571}
559 572
573static void cxt_fixup_mute_led_eapd(struct hda_codec *codec,
574 const struct hda_fixup *fix, int action)
575{
576 struct conexant_spec *spec = codec->spec;
577
578 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
579 spec->mute_led_eapd = 0x1b;
580 spec->dynamic_eapd = 1;
581 spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook_mute_led;
582 }
583}
584
560/* 585/*
561 * Fix max input level on mixer widget to 0dB 586 * Fix max input level on mixer widget to 0dB
562 * (originally it has 0x2b steps with 0dB offset 0x14) 587 * (originally it has 0x2b steps with 0dB offset 0x14)
@@ -705,6 +730,10 @@ static const struct hda_fixup cxt_fixups[] = {
705 .type = HDA_FIXUP_FUNC, 730 .type = HDA_FIXUP_FUNC,
706 .v.func = cxt_fixup_cap_mix_amp_5047, 731 .v.func = cxt_fixup_cap_mix_amp_5047,
707 }, 732 },
733 [CXT_FIXUP_MUTE_LED_EAPD] = {
734 .type = HDA_FIXUP_FUNC,
735 .v.func = cxt_fixup_mute_led_eapd,
736 },
708}; 737};
709 738
710static const struct snd_pci_quirk cxt5045_fixups[] = { 739static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -762,6 +791,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
762 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), 791 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
763 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), 792 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
764 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), 793 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
794 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
765 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 795 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
766 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 796 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
767 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 797 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
@@ -780,6 +810,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
780 { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, 810 { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
781 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, 811 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
782 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, 812 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
813 { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
783 {} 814 {}
784}; 815};
785 816
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index da03693099eb..172395465e8a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -288,6 +288,80 @@ static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
288 snd_hda_jack_unsol_event(codec, res >> 2); 288 snd_hda_jack_unsol_event(codec, res >> 2);
289} 289}
290 290
291/* Change EAPD to verb control */
292static void alc_fill_eapd_coef(struct hda_codec *codec)
293{
294 int coef;
295
296 coef = alc_get_coef0(codec);
297
298 switch (codec->vendor_id) {
299 case 0x10ec0262:
300 alc_update_coef_idx(codec, 0x7, 0, 1<<5);
301 break;
302 case 0x10ec0267:
303 case 0x10ec0268:
304 alc_update_coef_idx(codec, 0x7, 0, 1<<13);
305 break;
306 case 0x10ec0269:
307 if ((coef & 0x00f0) == 0x0010)
308 alc_update_coef_idx(codec, 0xd, 0, 1<<14);
309 if ((coef & 0x00f0) == 0x0020)
310 alc_update_coef_idx(codec, 0x4, 1<<15, 0);
311 if ((coef & 0x00f0) == 0x0030)
312 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
313 break;
314 case 0x10ec0280:
315 case 0x10ec0284:
316 case 0x10ec0290:
317 case 0x10ec0292:
318 alc_update_coef_idx(codec, 0x4, 1<<15, 0);
319 break;
320 case 0x10ec0233:
321 case 0x10ec0255:
322 case 0x10ec0282:
323 case 0x10ec0283:
324 case 0x10ec0286:
325 case 0x10ec0288:
326 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
327 break;
328 case 0x10ec0285:
329 case 0x10ec0293:
330 alc_update_coef_idx(codec, 0xa, 1<<13, 0);
331 break;
332 case 0x10ec0662:
333 if ((coef & 0x00f0) == 0x0030)
334 alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
335 break;
336 case 0x10ec0272:
337 case 0x10ec0273:
338 case 0x10ec0663:
339 case 0x10ec0665:
340 case 0x10ec0670:
341 case 0x10ec0671:
342 case 0x10ec0672:
343 alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
344 break;
345 case 0x10ec0668:
346 alc_update_coef_idx(codec, 0x7, 3<<13, 0);
347 break;
348 case 0x10ec0867:
349 alc_update_coef_idx(codec, 0x4, 1<<10, 0);
350 break;
351 case 0x10ec0888:
352 if ((coef & 0x00f0) == 0x0020 || (coef & 0x00f0) == 0x0030)
353 alc_update_coef_idx(codec, 0x7, 1<<5, 0);
354 break;
355 case 0x10ec0892:
356 alc_update_coef_idx(codec, 0x7, 1<<5, 0);
357 break;
358 case 0x10ec0899:
359 case 0x10ec0900:
360 alc_update_coef_idx(codec, 0x7, 1<<1, 0);
361 break;
362 }
363}
364
291/* additional initialization for ALC888 variants */ 365/* additional initialization for ALC888 variants */
292static void alc888_coef_init(struct hda_codec *codec) 366static void alc888_coef_init(struct hda_codec *codec)
293{ 367{
@@ -339,6 +413,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
339/* generic EAPD initialization */ 413/* generic EAPD initialization */
340static void alc_auto_init_amp(struct hda_codec *codec, int type) 414static void alc_auto_init_amp(struct hda_codec *codec, int type)
341{ 415{
416 alc_fill_eapd_coef(codec);
342 alc_auto_setup_eapd(codec, true); 417 alc_auto_setup_eapd(codec, true);
343 switch (type) { 418 switch (type) {
344 case ALC_INIT_GPIO1: 419 case ALC_INIT_GPIO1:
@@ -5212,9 +5287,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
5212 } 5287 }
5213 } 5288 }
5214 5289
5215 /* Class D */
5216 alc_update_coef_idx(codec, 0xd, 0, 1<<14);
5217
5218 /* HP */ 5290 /* HP */
5219 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 5291 alc_update_coef_idx(codec, 0x4, 0, 1<<11);
5220} 5292}
@@ -6124,29 +6196,6 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
6124 {} 6196 {}
6125}; 6197};
6126 6198
6127static void alc662_fill_coef(struct hda_codec *codec)
6128{
6129 int coef;
6130
6131 coef = alc_get_coef0(codec);
6132
6133 switch (codec->vendor_id) {
6134 case 0x10ec0662:
6135 if ((coef & 0x00f0) == 0x0030)
6136 alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
6137 break;
6138 case 0x10ec0272:
6139 case 0x10ec0273:
6140 case 0x10ec0663:
6141 case 0x10ec0665:
6142 case 0x10ec0670:
6143 case 0x10ec0671:
6144 case 0x10ec0672:
6145 alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
6146 break;
6147 }
6148}
6149
6150/* 6199/*
6151 */ 6200 */
6152static int patch_alc662(struct hda_codec *codec) 6201static int patch_alc662(struct hda_codec *codec)
@@ -6169,10 +6218,6 @@ static int patch_alc662(struct hda_codec *codec)
6169 case 0x10ec0668: 6218 case 0x10ec0668:
6170 spec->init_hook = alc668_restore_default_value; 6219 spec->init_hook = alc668_restore_default_value;
6171 break; 6220 break;
6172 default:
6173 spec->init_hook = alc662_fill_coef;
6174 alc662_fill_coef(codec);
6175 break;
6176 } 6221 }
6177 6222
6178 snd_hda_pick_fixup(codec, alc662_fixup_models, 6223 snd_hda_pick_fixup(codec, alc662_fixup_models,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index f119a41ed9a9..7c83bab69dee 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -885,6 +885,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
885 return changed; 885 return changed;
886} 886}
887 887
888static void kctl_private_value_free(struct snd_kcontrol *kctl)
889{
890 kfree((void *)kctl->private_value);
891}
892
888static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, 893static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
889 int validx, int bUnitID) 894 int validx, int bUnitID)
890{ 895{
@@ -919,6 +924,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
919 return -ENOMEM; 924 return -ENOMEM;
920 } 925 }
921 926
927 kctl->private_free = kctl_private_value_free;
922 err = snd_ctl_add(mixer->chip->card, kctl); 928 err = snd_ctl_add(mixer->chip->card, kctl);
923 if (err < 0) 929 if (err < 0)
924 return err; 930 return err;
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 57b9c2b7c4ff..6f6733331d95 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring)
128 struct tpacket2_hdr *header = ring; 128 struct tpacket2_hdr *header = ring;
129 int count = 0; 129 int count = 0;
130 130
131 while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) { 131 while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) {
132 count++; 132 count++;
133 header = ring + (count * getpagesize()); 133 header = ring + (count * getpagesize());
134 } 134 }