aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-09-30 04:44:27 -0400
committerIngo Molnar <mingo@kernel.org>2016-09-30 04:44:27 -0400
commit536e0e81e0b04305ce40f6cc4299d29dc9bbc673 (patch)
tree1cb7f695bc220bb9cdc5cb9ed1ef005f7771b1fd
parentc1fad9ef7ed14aad464972e6444e7a3bd5670f26 (diff)
parent53061afee43bc5041b67a45b6d793e7afdcf9ca7 (diff)
Merge branch 'linus' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt1
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst21
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst8
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h2
-rw-r--r--arch/arm64/kernel/kgdb.c36
-rw-r--r--arch/arm64/kernel/smp.c14
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/Kconfig.debug36
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/ath79/clock.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/include/asm/asmmacro.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h4
-rw-r--r--arch/mips/include/asm/mach-paravirt/kernel-entry-init.h2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2
-rw-r--r--arch/mips/kernel/process.c8
-rw-r--r--arch/mips/kernel/setup.c7
-rw-r--r--arch/mips/kernel/smp.c7
-rw-r--r--arch/mips/kernel/uprobes.c2
-rw-r--r--arch/mips/kernel/vdso.c8
-rw-r--r--arch/mips/math-emu/dsemul.c1
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/init.c13
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/sh/include/asm/atomic-llsc.h2
-rw-r--r--arch/x86/events/intel/bts.c5
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--block/blk-mq.c16
-rw-r--r--block/blk-throttle.c6
-rw-r--r--crypto/rsa-pkcs1pad.c41
-rw-r--r--drivers/base/regmap/regmap.c6
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c18
-rw-r--r--drivers/i2c/busses/i2c-qup.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/input/touchscreen/silead.c16
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/irqchip/irq-mips-gic.c105
-rw-r--r--drivers/media/cec-edid.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c1
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c42
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c16
-rw-r--r--drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c16
-rw-r--r--drivers/media/platform/rcar-fcp.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c14
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mtd/nand/davinci_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c12
-rw-r--r--drivers/mtd/nand/mtk_nand.c7
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/net/can/flexcan.c13
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c34
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/phy/mdio-xgene.c6
-rw-r--r--drivers/net/usb/r8152.c281
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c19
-rw-r--r--drivers/net/xen-netback/xenbus.c46
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c32
-rw-r--r--drivers/s390/net/qeth_l2_main.c6
-rw-r--r--drivers/s390/net/qeth_l3_main.c29
-rw-r--r--drivers/s390/net/qeth_l3_sys.c5
-rw-r--r--drivers/staging/media/cec/TODO1
-rw-r--r--drivers/staging/media/cec/cec-adap.c26
-rw-r--r--drivers/staging/media/cec/cec-api.c12
-rw-r--r--drivers/staging/media/cec/cec-core.c27
-rw-r--r--drivers/staging/media/pulse8-cec/pulse8-cec.c10
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/ioctl.c12
-rw-r--r--fs/configfs/file.c1
-rw-r--r--include/linux/cec-funcs.h78
-rw-r--r--include/linux/cec.h5
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/media/cec.h2
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h14
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/xfrm.h4
-rw-r--r--kernel/cgroup.c29
-rw-r--r--kernel/cpuset.c19
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/trace/trace.c29
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/radix-tree.c8
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory.c12
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c19
-rw-r--r--net/batman-adv/bat_v_elp.c2
-rw-r--r--net/batman-adv/routing.c28
-rw-r--r--net/ipv4/ip_input.c5
-rw-r--r--net/ipv4/ip_vti.c15
-rw-r--r--net/ipv4/ipmr.c7
-rw-r--r--net/ipv4/netfilter/nft_chain_route_ipv4.c11
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv6/ip6_vti.c19
-rw-r--r--net/ipv6/ip6mr.c7
-rw-r--r--net/ipv6/netfilter/nft_chain_route_ipv6.c10
-rw-r--r--net/ipv6/route.c11
-rw-r--r--net/ipv6/xfrm6_input.c16
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/agg-tx.c3
-rw-r--r--net/mac80211/mesh_hwmp.c3
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/tx.c73
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_nat_core.c5
-rw-r--r--net/netfilter/nf_tables_trace.c2
-rw-r--r--net/sctp/input.c27
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/xfrm/xfrm_state.c1
-rw-r--r--net/xfrm/xfrm_user.c9
-rw-r--r--scripts/recordmcount.c1
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--security/keys/encrypted-keys/encrypted.c11
-rw-r--r--tools/testing/radix-tree/Makefile2
-rw-r--r--tools/testing/radix-tree/multiorder.c16
155 files changed, 1245 insertions, 716 deletions
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
index 1112e0d794e1..820fee4b77b6 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -13,6 +13,7 @@ Required properties:
13- touchscreen-size-y : See touchscreen.txt 13- touchscreen-size-y : See touchscreen.txt
14 14
15Optional properties: 15Optional properties:
16- firmware-name : File basename (string) for board specific firmware
16- touchscreen-inverted-x : See touchscreen.txt 17- touchscreen-inverted-x : See touchscreen.txt
17- touchscreen-inverted-y : See touchscreen.txt 18- touchscreen-inverted-y : See touchscreen.txt
18- touchscreen-swapped-x-y : See touchscreen.txt 19- touchscreen-swapped-x-y : See touchscreen.txt
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index 04ee90099676..201d4839931c 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -144,7 +144,7 @@ logical address types are already defined will return with error ``EBUSY``.
144 144
145 - ``flags`` 145 - ``flags``
146 146
147 - Flags. No flags are defined yet, so set this to 0. 147 - Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
148 148
149 - .. row 7 149 - .. row 7
150 150
@@ -201,6 +201,25 @@ logical address types are already defined will return with error ``EBUSY``.
201 give the CEC framework more information about the device type, even 201 give the CEC framework more information about the device type, even
202 though the framework won't use it directly in the CEC message. 202 though the framework won't use it directly in the CEC message.
203 203
204.. _cec-log-addrs-flags:
205
206.. flat-table:: Flags for struct cec_log_addrs
207 :header-rows: 0
208 :stub-columns: 0
209 :widths: 3 1 4
210
211
212 - .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
213
214 - ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
215
216 - 1
217
218 - By default if no logical address of the requested type can be claimed, then
219 it will go back to the unconfigured state. If this flag is set, then it will
220 fallback to the Unregistered logical address. Note that if the Unregistered
221 logical address was explicitly requested, then this flag has no effect.
222
204.. _cec-versions: 223.. _cec-versions:
205 224
206.. flat-table:: CEC Versions 225.. flat-table:: CEC Versions
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 7a6d6d00ce19..2e1e73928396 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -64,7 +64,8 @@ it is guaranteed that the state did change in between the two events.
64 64
65 - ``phys_addr`` 65 - ``phys_addr``
66 66
67 - The current physical address. 67 - The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
68 valid physical address is set.
68 69
69 - .. row 2 70 - .. row 2
70 71
@@ -72,7 +73,10 @@ it is guaranteed that the state did change in between the two events.
72 73
73 - ``log_addr_mask`` 74 - ``log_addr_mask``
74 75
75 - The current set of claimed logical addresses. 76 - The current set of claimed logical addresses. This is 0 if no logical
77 addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
78 If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
79 has the unregistered logical address. In that case all other bits are 0.
76 80
77 81
78 82
diff --git a/MAINTAINERS b/MAINTAINERS
index a0ce40f4c66c..b003d0ca6238 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2501,7 +2501,7 @@ S: Supported
2501F: kernel/bpf/ 2501F: kernel/bpf/
2502 2502
2503BROADCOM B44 10/100 ETHERNET DRIVER 2503BROADCOM B44 10/100 ETHERNET DRIVER
2504M: Gary Zambrano <zambrano@broadcom.com> 2504M: Michael Chan <michael.chan@broadcom.com>
2505L: netdev@vger.kernel.org 2505L: netdev@vger.kernel.org
2506S: Supported 2506S: Supported
2507F: drivers/net/ethernet/broadcom/b44.* 2507F: drivers/net/ethernet/broadcom/b44.*
@@ -8161,6 +8161,15 @@ S: Maintained
8161W: https://fedorahosted.org/dropwatch/ 8161W: https://fedorahosted.org/dropwatch/
8162F: net/core/drop_monitor.c 8162F: net/core/drop_monitor.c
8163 8163
8164NETWORKING [DSA]
8165M: Andrew Lunn <andrew@lunn.ch>
8166M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
8167M: Florian Fainelli <f.fainelli@gmail.com>
8168S: Maintained
8169F: net/dsa/
8170F: include/net/dsa.h
8171F: drivers/net/dsa/
8172
8164NETWORKING [GENERAL] 8173NETWORKING [GENERAL]
8165M: "David S. Miller" <davem@davemloft.net> 8174M: "David S. Miller" <davem@davemloft.net>
8166L: netdev@vger.kernel.org 8175L: netdev@vger.kernel.org
@@ -8736,7 +8745,7 @@ F: drivers/oprofile/
8736F: include/linux/oprofile.h 8745F: include/linux/oprofile.h
8737 8746
8738ORACLE CLUSTER FILESYSTEM 2 (OCFS2) 8747ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
8739M: Mark Fasheh <mfasheh@suse.com> 8748M: Mark Fasheh <mfasheh@versity.com>
8740M: Joel Becker <jlbec@evilplan.org> 8749M: Joel Becker <jlbec@evilplan.org>
8741L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) 8750L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
8742W: http://ocfs2.wiki.kernel.org 8751W: http://ocfs2.wiki.kernel.org
diff --git a/Makefile b/Makefile
index 74e22c2f408b..ce2ddb3fec98 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 8 2PATCHLEVEL = 8
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION = -rc8
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 4b6b3f72a215..b71420a12f26 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -61,8 +61,6 @@
61 61
62#define AARCH64_BREAK_KGDB_DYN_DBG \ 62#define AARCH64_BREAK_KGDB_DYN_DBG \
63 (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5)) 63 (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
64#define KGDB_DYN_BRK_INS_BYTE(x) \
65 ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
66 64
67#define CACHE_FLUSH_IS_SAFE 1 65#define CACHE_FLUSH_IS_SAFE 1
68 66
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 8c57f6496e56..e017a9493b92 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -19,10 +19,13 @@
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */ 20 */
21 21
22#include <linux/bug.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/kdebug.h> 24#include <linux/kdebug.h>
24#include <linux/kgdb.h> 25#include <linux/kgdb.h>
25#include <linux/kprobes.h> 26#include <linux/kprobes.h>
27#include <asm/debug-monitors.h>
28#include <asm/insn.h>
26#include <asm/traps.h> 29#include <asm/traps.h>
27 30
28struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { 31struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
338 unregister_die_notifier(&kgdb_notifier); 341 unregister_die_notifier(&kgdb_notifier);
339} 342}
340 343
341/* 344struct kgdb_arch arch_kgdb_ops;
342 * ARM instructions are always in LE. 345
343 * Break instruction is encoded in LE format 346int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
344 */ 347{
345struct kgdb_arch arch_kgdb_ops = { 348 int err;
346 .gdb_bpt_instr = { 349
347 KGDB_DYN_BRK_INS_BYTE(0), 350 BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
348 KGDB_DYN_BRK_INS_BYTE(1), 351
349 KGDB_DYN_BRK_INS_BYTE(2), 352 err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
350 KGDB_DYN_BRK_INS_BYTE(3), 353 if (err)
351 } 354 return err;
352}; 355
356 return aarch64_insn_write((void *)bpt->bpt_addr,
357 (u32)AARCH64_BREAK_KGDB_DYN_DBG);
358}
359
360int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
361{
362 return aarch64_insn_write((void *)bpt->bpt_addr,
363 *(u32 *)bpt->saved_instr);
364}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d93d43352504..3ff173e92582 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
201 return ret; 201 return ret;
202} 202}
203 203
204static void smp_store_cpu_info(unsigned int cpuid)
205{
206 store_cpu_topology(cpuid);
207 numa_store_cpu_info(cpuid);
208}
209
210/* 204/*
211 * This is the secondary CPU boot entry. We're using this CPUs 205 * This is the secondary CPU boot entry. We're using this CPUs
212 * idle thread stack, but a set of temporary page tables. 206 * idle thread stack, but a set of temporary page tables.
@@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
254 */ 248 */
255 notify_cpu_starting(cpu); 249 notify_cpu_starting(cpu);
256 250
257 smp_store_cpu_info(cpu); 251 store_cpu_topology(cpu);
258 252
259 /* 253 /*
260 * OK, now it's safe to let the boot CPU continue. Wait for 254 * OK, now it's safe to let the boot CPU continue. Wait for
@@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
689{ 683{
690 int err; 684 int err;
691 unsigned int cpu; 685 unsigned int cpu;
686 unsigned int this_cpu;
692 687
693 init_cpu_topology(); 688 init_cpu_topology();
694 689
695 smp_store_cpu_info(smp_processor_id()); 690 this_cpu = smp_processor_id();
691 store_cpu_topology(this_cpu);
692 numa_store_cpu_info(this_cpu);
696 693
697 /* 694 /*
698 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 695 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
719 continue; 716 continue;
720 717
721 set_cpu_present(cpu, true); 718 set_cpu_present(cpu, true);
719 numa_store_cpu_info(cpu);
722 } 720 }
723} 721}
724 722
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 26388562e300..212ff92920d2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,6 +65,7 @@ config MIPS
65 select ARCH_CLOCKSOURCE_DATA 65 select ARCH_CLOCKSOURCE_DATA
66 select HANDLE_DOMAIN_IRQ 66 select HANDLE_DOMAIN_IRQ
67 select HAVE_EXIT_THREAD 67 select HAVE_EXIT_THREAD
68 select HAVE_REGS_AND_STACK_ACCESS_API
68 69
69menu "Machine selection" 70menu "Machine selection"
70 71
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index f0e314ceb8ba..7f975b20b20c 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -113,42 +113,6 @@ config SPINLOCK_TEST
113 help 113 help
114 Add several files to the debugfs to test spinlock speed. 114 Add several files to the debugfs to test spinlock speed.
115 115
116if CPU_MIPSR6
117
118choice
119 prompt "Compact branch policy"
120 default MIPS_COMPACT_BRANCHES_OPTIMAL
121
122config MIPS_COMPACT_BRANCHES_NEVER
123 bool "Never (force delay slot branches)"
124 help
125 Pass the -mcompact-branches=never flag to the compiler in order to
126 force it to always emit branches with delay slots, and make no use
127 of the compact branch instructions introduced by MIPSr6. This is
128 useful if you suspect there may be an issue with compact branches in
129 either the compiler or the CPU.
130
131config MIPS_COMPACT_BRANCHES_OPTIMAL
132 bool "Optimal (use where beneficial)"
133 help
134 Pass the -mcompact-branches=optimal flag to the compiler in order for
135 it to make use of compact branch instructions where it deems them
136 beneficial, and use branches with delay slots elsewhere. This is the
137 default compiler behaviour, and should be used unless you have a
138 reason to choose otherwise.
139
140config MIPS_COMPACT_BRANCHES_ALWAYS
141 bool "Always (force compact branches)"
142 help
143 Pass the -mcompact-branches=always flag to the compiler in order to
144 force it to always emit compact branches, making no use of branch
145 instructions with delay slots. This can result in more compact code
146 which may be beneficial in some scenarios.
147
148endchoice
149
150endif # CPU_MIPSR6
151
152config SCACHE_DEBUGFS 116config SCACHE_DEBUGFS
153 bool "L2 cache debugfs entries" 117 bool "L2 cache debugfs entries"
154 depends on DEBUG_FS 118 depends on DEBUG_FS
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index efd7a9dc93c4..598ab2930fce 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -203,10 +203,6 @@ endif
203toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt) 203toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
204cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT 204cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
205 205
206cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
207cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
208cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
209
210# 206#
211# Firmware support 207# Firmware support
212# 208#
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 2e7378467c5c..cc3a1e33a600 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
96 struct clk *clk; 96 struct clk *clk;
97 97
98 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); 98 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
99 if (!clk) 99 if (IS_ERR(clk))
100 panic("failed to allocate %s clock structure", name); 100 panic("failed to allocate %s clock structure", name);
101 101
102 return clk; 102 return clk;
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index b31fbc9d6eae..37a932d9148c 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
1059{ 1059{
1060 return of_platform_bus_probe(NULL, octeon_ids, NULL); 1060 return of_platform_bus_probe(NULL, octeon_ids, NULL);
1061} 1061}
1062device_initcall(octeon_publish_devices); 1062arch_initcall(octeon_publish_devices);
1063 1063
1064MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); 1064MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
1065MODULE_LICENSE("GPL"); 1065MODULE_LICENSE("GPL");
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 56584a659183..83054f79f72a 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -157,6 +157,7 @@
157 ldc1 $f28, THREAD_FPR28(\thread) 157 ldc1 $f28, THREAD_FPR28(\thread)
158 ldc1 $f30, THREAD_FPR30(\thread) 158 ldc1 $f30, THREAD_FPR30(\thread)
159 ctc1 \tmp, fcr31 159 ctc1 \tmp, fcr31
160 .set pop
160 .endm 161 .endm
161 162
162 .macro fpu_restore_16odd thread 163 .macro fpu_restore_16odd thread
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
index 0cf5ac1f7245..8ff2cbdf2c3e 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -15,8 +15,8 @@
15static inline bool __should_swizzle_bits(volatile void *a) 15static inline bool __should_swizzle_bits(volatile void *a)
16{ 16{
17 extern const bool octeon_should_swizzle_table[]; 17 extern const bool octeon_should_swizzle_table[];
18 u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
18 19
19 unsigned long did = ((unsigned long)a >> 40) & 0xff;
20 return octeon_should_swizzle_table[did]; 20 return octeon_should_swizzle_table[did];
21} 21}
22 22
@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
29 29
30#define __should_swizzle_bits(a) false 30#define __should_swizzle_bits(a) false
31 31
32static inline bool __should_swizzle_addr(unsigned long p) 32static inline bool __should_swizzle_addr(u64 p)
33{ 33{
34 /* boot bus? */ 34 /* boot bus? */
35 return ((p >> 40) & 0xff) == 0; 35 return ((p >> 40) & 0xff) == 0;
diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
index 2f82bfa3a773..c9f5769dfc8f 100644
--- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
@@ -11,11 +11,13 @@
11#define CP0_EBASE $15, 1 11#define CP0_EBASE $15, 1
12 12
13 .macro kernel_entry_setup 13 .macro kernel_entry_setup
14#ifdef CONFIG_SMP
14 mfc0 t0, CP0_EBASE 15 mfc0 t0, CP0_EBASE
15 andi t0, t0, 0x3ff # CPUNum 16 andi t0, t0, 0x3ff # CPUNum
16 beqz t0, 1f 17 beqz t0, 1f
17 # CPUs other than zero goto smp_bootstrap 18 # CPUs other than zero goto smp_bootstrap
18 j smp_bootstrap 19 j smp_bootstrap
20#endif /* CONFIG_SMP */
19 21
201: 221:
21 .endm 23 .endm
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index c3372cac6db2..0a7e10b5f9e3 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1164,7 +1164,9 @@ fpu_emul:
1164 regs->regs[31] = r31; 1164 regs->regs[31] = r31;
1165 regs->cp0_epc = epc; 1165 regs->cp0_epc = epc;
1166 if (!used_math()) { /* First time FPU user. */ 1166 if (!used_math()) { /* First time FPU user. */
1167 preempt_disable();
1167 err = init_fpu(); 1168 err = init_fpu();
1169 preempt_enable();
1168 set_used_math(); 1170 set_used_math();
1169 } 1171 }
1170 lose_fpu(1); /* Save FPU state for the emulator. */ 1172 lose_fpu(1); /* Save FPU state for the emulator. */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 7429ad09fbe3..d2d061520a23 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
605 return -EOPNOTSUPP; 605 return -EOPNOTSUPP;
606 606
607 /* Avoid inadvertently triggering emulation */ 607 /* Avoid inadvertently triggering emulation */
608 if ((value & PR_FP_MODE_FR) && cpu_has_fpu && 608 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
609 !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 609 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
610 return -EOPNOTSUPP; 610 return -EOPNOTSUPP;
611 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) 611 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
612 return -EOPNOTSUPP; 612 return -EOPNOTSUPP;
613 613
614 /* FR = 0 not supported in MIPS R6 */ 614 /* FR = 0 not supported in MIPS R6 */
615 if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) 615 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
616 return -EOPNOTSUPP; 616 return -EOPNOTSUPP;
617 617
618 /* Proceed with the mode switch */ 618 /* Proceed with the mode switch */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 36cf8d65c47d..3be0e6ba2797 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
87 int x = boot_mem_map.nr_map; 87 int x = boot_mem_map.nr_map;
88 int i; 88 int i;
89 89
90 /*
91 * If the region reaches the top of the physical address space, adjust
92 * the size slightly so that (start + size) doesn't overflow
93 */
94 if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
95 --size;
96
90 /* Sanity check */ 97 /* Sanity check */
91 if (start + size < start) { 98 if (start + size < start) {
92 pr_warn("Trying to add an invalid memory region, skipped\n"); 99 pr_warn("Trying to add an invalid memory region, skipped\n");
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index f95f094f36e4..b0baf48951fa 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
322 cpumask_set_cpu(cpu, &cpu_coherent_mask); 322 cpumask_set_cpu(cpu, &cpu_coherent_mask);
323 notify_cpu_starting(cpu); 323 notify_cpu_starting(cpu);
324 324
325 cpumask_set_cpu(cpu, &cpu_callin_map);
326 synchronise_count_slave(cpu);
327
325 set_cpu_online(cpu, true); 328 set_cpu_online(cpu, true);
326 329
327 set_cpu_sibling_map(cpu); 330 set_cpu_sibling_map(cpu);
@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
329 332
330 calculate_cpu_foreign_map(); 333 calculate_cpu_foreign_map();
331 334
332 cpumask_set_cpu(cpu, &cpu_callin_map);
333
334 synchronise_count_slave(cpu);
335
336 /* 335 /*
337 * irq will be enabled in ->smp_finish(), enabling it too early 336 * irq will be enabled in ->smp_finish(), enabling it too early
338 * is dangerous. 337 * is dangerous.
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index 8452d933a645..1149b30c9aeb 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -222,7 +222,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
222 return NOTIFY_DONE; 222 return NOTIFY_DONE;
223 223
224 switch (val) { 224 switch (val) {
225 case DIE_BREAK: 225 case DIE_UPROBE:
226 if (uprobe_pre_sstep_notifier(regs)) 226 if (uprobe_pre_sstep_notifier(regs))
227 return NOTIFY_STOP; 227 return NOTIFY_STOP;
228 break; 228 break;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 9abe447a4b48..f9dbfb14af33 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
39static void __init init_vdso_image(struct mips_vdso_image *image) 39static void __init init_vdso_image(struct mips_vdso_image *image)
40{ 40{
41 unsigned long num_pages, i; 41 unsigned long num_pages, i;
42 unsigned long data_pfn;
42 43
43 BUG_ON(!PAGE_ALIGNED(image->data)); 44 BUG_ON(!PAGE_ALIGNED(image->data));
44 BUG_ON(!PAGE_ALIGNED(image->size)); 45 BUG_ON(!PAGE_ALIGNED(image->size));
45 46
46 num_pages = image->size / PAGE_SIZE; 47 num_pages = image->size / PAGE_SIZE;
47 48
48 for (i = 0; i < num_pages; i++) { 49 data_pfn = __phys_to_pfn(__pa_symbol(image->data));
49 image->mapping.pages[i] = 50 for (i = 0; i < num_pages; i++)
50 virt_to_page(image->data + (i * PAGE_SIZE)); 51 image->mapping.pages[i] = pfn_to_page(data_pfn + i);
51 }
52} 52}
53 53
54static int __init init_vdso(void) 54static int __init init_vdso(void)
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 72a4642eee2c..4a094f7acb3d 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
298 /* Set EPC to return to post-branch instruction */ 298 /* Set EPC to return to post-branch instruction */
299 xcp->cp0_epc = current->thread.bd_emu_cont_pc; 299 xcp->cp0_epc = current->thread.bd_emu_cont_pc;
300 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); 300 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
301 MIPS_FPU_EMU_INC_STATS(ds_emul);
301 return true; 302 return true;
302} 303}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index cd72805b64a7..fa7d8d3790bf 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
800 * If address-based cache ops don't require an SMP call, then 800 * If address-based cache ops don't require an SMP call, then
801 * use them exclusively for small flushes. 801 * use them exclusively for small flushes.
802 */ 802 */
803 size = start - end; 803 size = end - start;
804 cache_size = icache_size; 804 cache_size = icache_size;
805 if (!cpu_has_ic_fills_f_dc) { 805 if (!cpu_has_ic_fills_f_dc) {
806 size *= 2; 806 size *= 2;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index a5509e7dcad2..2c3749d98f04 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
261{ 261{
262 struct maar_config cfg[BOOT_MEM_MAP_MAX]; 262 struct maar_config cfg[BOOT_MEM_MAP_MAX];
263 unsigned i, num_configured, num_cfg = 0; 263 unsigned i, num_configured, num_cfg = 0;
264 phys_addr_t skip;
265 264
266 for (i = 0; i < boot_mem_map.nr_map; i++) { 265 for (i = 0; i < boot_mem_map.nr_map; i++) {
267 switch (boot_mem_map.map[i].type) { 266 switch (boot_mem_map.map[i].type) {
@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
272 continue; 271 continue;
273 } 272 }
274 273
275 skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); 274 /* Round lower up */
276
277 cfg[num_cfg].lower = boot_mem_map.map[i].addr; 275 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
278 cfg[num_cfg].lower += skip; 276 cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
279 277
280 cfg[num_cfg].upper = cfg[num_cfg].lower; 278 /* Round upper down */
281 cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; 279 cfg[num_cfg].upper = boot_mem_map.map[i].addr +
282 cfg[num_cfg].upper -= skip; 280 boot_mem_map.map[i].size;
281 cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
283 282
284 cfg[num_cfg].attrs = MIPS_MAAR_S; 283 cfg[num_cfg].attrs = MIPS_MAAR_S;
285 num_cfg++; 284 num_cfg++;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index bc0c91e84ca0..38a5c657ffd3 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -124,6 +124,13 @@ static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
124 r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); 124 r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
125} 125}
126 126
127static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
128{
129 unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
130
131 return (resource_flags & flags) == flags;
132}
133
127static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) 134static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
128{ 135{
129 phb->ioda.pe_array[pe_no].phb = phb; 136 phb->ioda.pe_array[pe_no].phb = phb;
@@ -2871,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2871 res = &pdev->resource[i + PCI_IOV_RESOURCES]; 2878 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2872 if (!res->flags || res->parent) 2879 if (!res->flags || res->parent)
2873 continue; 2880 continue;
2874 if (!pnv_pci_is_m64(phb, res)) { 2881 if (!pnv_pci_is_m64_flags(res->flags)) {
2875 dev_warn(&pdev->dev, "Don't support SR-IOV with" 2882 dev_warn(&pdev->dev, "Don't support SR-IOV with"
2876 " non M64 VF BAR%d: %pR. \n", 2883 " non M64 VF BAR%d: %pR. \n",
2877 i, res); 2884 i, res);
@@ -3096,7 +3103,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3096 * alignment for any 64-bit resource, PCIe doesn't care and 3103 * alignment for any 64-bit resource, PCIe doesn't care and
3097 * bridges only do 64-bit prefetchable anyway. 3104 * bridges only do 64-bit prefetchable anyway.
3098 */ 3105 */
3099 if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) 3106 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
3100 return phb->ioda.m64_segsize; 3107 return phb->ioda.m64_segsize;
3101 if (type & IORESOURCE_MEM) 3108 if (type & IORESOURCE_MEM)
3102 return phb->ioda.m32_segsize; 3109 return phb->ioda.m32_segsize;
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index caea2c45f6c2..1d159ce50f5a 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
60" movco.l %0, @%3 \n" \ 60" movco.l %0, @%3 \n" \
61" bf 1b \n" \ 61" bf 1b \n" \
62" synco \n" \ 62" synco \n" \
63 : "=&z" (temp), "=&z" (res) \ 63 : "=&z" (temp), "=&r" (res) \
64 : "r" (i), "r" (&v->counter) \ 64 : "r" (i), "r" (&v->counter) \
65 : "t"); \ 65 : "t"); \
66 \ 66 \
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index bdcd6510992c..982c9e31daca 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -455,7 +455,7 @@ int intel_bts_interrupt(void)
455 * The only surefire way of knowing if this NMI is ours is by checking 455 * The only surefire way of knowing if this NMI is ours is by checking
456 * the write ptr against the PMI threshold. 456 * the write ptr against the PMI threshold.
457 */ 457 */
458 if (ds->bts_index >= ds->bts_interrupt_threshold) 458 if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
459 handled = 1; 459 handled = 1;
460 460
461 /* 461 /*
@@ -584,7 +584,8 @@ static __init int bts_init(void)
584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) 584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
585 return -ENODEV; 585 return -ENODEV;
586 586
587 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE; 587 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
588 PERF_PMU_CAP_EXCLUSIVE;
588 bts_pmu.task_ctx_nr = perf_sw_context; 589 bts_pmu.task_ctx_nr = perf_sw_context;
589 bts_pmu.event_init = bts_event_init; 590 bts_pmu.event_init = bts_event_init;
590 bts_pmu.add = bts_event_add; 591 bts_pmu.add = bts_event_add;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 849dc09fa4f0..e3353c97d086 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
917 } 917 }
918} 918}
919 919
920static int populate_pmd(struct cpa_data *cpa, 920static long populate_pmd(struct cpa_data *cpa,
921 unsigned long start, unsigned long end, 921 unsigned long start, unsigned long end,
922 unsigned num_pages, pud_t *pud, pgprot_t pgprot) 922 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
923{ 923{
924 unsigned int cur_pages = 0; 924 long cur_pages = 0;
925 pmd_t *pmd; 925 pmd_t *pmd;
926 pgprot_t pmd_pgprot; 926 pgprot_t pmd_pgprot;
927 927
@@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
991 return num_pages; 991 return num_pages;
992} 992}
993 993
994static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, 994static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
995 pgprot_t pgprot) 995 pgprot_t pgprot)
996{ 996{
997 pud_t *pud; 997 pud_t *pud;
998 unsigned long end; 998 unsigned long end;
999 int cur_pages = 0; 999 long cur_pages = 0;
1000 pgprot_t pud_pgprot; 1000 pgprot_t pud_pgprot;
1001 1001
1002 end = start + (cpa->numpages << PAGE_SHIFT); 1002 end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
1052 1052
1053 /* Map trailing leftover */ 1053 /* Map trailing leftover */
1054 if (start < end) { 1054 if (start < end) {
1055 int tmp; 1055 long tmp;
1056 1056
1057 pud = pud_offset(pgd, start); 1057 pud = pud_offset(pgd, start);
1058 if (pud_none(*pud)) 1058 if (pud_none(*pud))
@@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1078 pgprot_t pgprot = __pgprot(_KERNPG_TABLE); 1078 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1079 pud_t *pud = NULL; /* shut up gcc */ 1079 pud_t *pud = NULL; /* shut up gcc */
1080 pgd_t *pgd_entry; 1080 pgd_t *pgd_entry;
1081 int ret; 1081 long ret;
1082 1082
1083 pgd_entry = cpa->pgd + pgd_index(addr); 1083 pgd_entry = cpa->pgd + pgd_index(addr);
1084 1084
@@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
1327 1327
1328static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 1328static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1329{ 1329{
1330 int ret, numpages = cpa->numpages; 1330 unsigned long numpages = cpa->numpages;
1331 int ret;
1331 1332
1332 while (numpages) { 1333 while (numpages) {
1333 /* 1334 /*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 677e29e29473..8dd3784eb075 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
245 * text and allocate a new stack because we can't rely on the 245 * text and allocate a new stack because we can't rely on the
246 * stack pointer being < 4GB. 246 * stack pointer being < 4GB.
247 */ 247 */
248 if (!IS_ENABLED(CONFIG_EFI_MIXED)) 248 if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
249 return 0; 249 return 0;
250 250
251 /* 251 /*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 13f5a6c1de76..c207fa9870eb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
296 if (ret) 296 if (ret)
297 return ERR_PTR(ret); 297 return ERR_PTR(ret);
298 298
299 /*
300 * Check if the hardware context is actually mapped to anything.
301 * If not tell the caller that it should skip this queue.
302 */
299 hctx = q->queue_hw_ctx[hctx_idx]; 303 hctx = q->queue_hw_ctx[hctx_idx];
304 if (!blk_mq_hw_queue_mapped(hctx)) {
305 ret = -EXDEV;
306 goto out_queue_exit;
307 }
300 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 308 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
301 309
302 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 310 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
303 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 311 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
304 if (!rq) { 312 if (!rq) {
305 blk_queue_exit(q); 313 ret = -EWOULDBLOCK;
306 return ERR_PTR(-EWOULDBLOCK); 314 goto out_queue_exit;
307 } 315 }
308 316
309 return rq; 317 return rq;
318
319out_queue_exit:
320 blk_queue_exit(q);
321 return ERR_PTR(ret);
310} 322}
311EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 323EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
312 324
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f1aba26f4719..a3ea8260c94c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
780 /* 780 /*
781 * If previous slice expired, start a new one otherwise renew/extend 781 * If previous slice expired, start a new one otherwise renew/extend
782 * existing slice to make sure it is at least throtl_slice interval 782 * existing slice to make sure it is at least throtl_slice interval
783 * long since now. 783 * long since now. New slice is started only for empty throttle group.
784 * If there is queued bio, that means there should be an active
785 * slice and it should be extended instead.
784 */ 786 */
785 if (throtl_slice_used(tg, rw)) 787 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
786 throtl_start_new_slice(tg, rw); 788 throtl_start_new_slice(tg, rw);
787 else { 789 else {
788 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) 790 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 877019a6d3ea..8baab4307f7b 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
298 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 298 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
299 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 299 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
300 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 300 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
301 unsigned int dst_len;
301 unsigned int pos; 302 unsigned int pos;
302 303 u8 *out_buf;
303 if (err == -EOVERFLOW)
304 /* Decrypted value had no leading 0 byte */
305 err = -EINVAL;
306 304
307 if (err) 305 if (err)
308 goto done; 306 goto done;
309 307
310 if (req_ctx->child_req.dst_len != ctx->key_size - 1) { 308 err = -EINVAL;
311 err = -EINVAL; 309 dst_len = req_ctx->child_req.dst_len;
310 if (dst_len < ctx->key_size - 1)
312 goto done; 311 goto done;
312
313 out_buf = req_ctx->out_buf;
314 if (dst_len == ctx->key_size) {
315 if (out_buf[0] != 0x00)
316 /* Decrypted value had no leading 0 byte */
317 goto done;
318
319 dst_len--;
320 out_buf++;
313 } 321 }
314 322
315 if (req_ctx->out_buf[0] != 0x02) { 323 if (out_buf[0] != 0x02)
316 err = -EINVAL;
317 goto done; 324 goto done;
318 } 325
319 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) 326 for (pos = 1; pos < dst_len; pos++)
320 if (req_ctx->out_buf[pos] == 0x00) 327 if (out_buf[pos] == 0x00)
321 break; 328 break;
322 if (pos < 9 || pos == req_ctx->child_req.dst_len) { 329 if (pos < 9 || pos == dst_len)
323 err = -EINVAL;
324 goto done; 330 goto done;
325 }
326 pos++; 331 pos++;
327 332
328 if (req->dst_len < req_ctx->child_req.dst_len - pos) 333 err = 0;
334
335 if (req->dst_len < dst_len - pos)
329 err = -EOVERFLOW; 336 err = -EOVERFLOW;
330 req->dst_len = req_ctx->child_req.dst_len - pos; 337 req->dst_len = dst_len - pos;
331 338
332 if (!err) 339 if (!err)
333 sg_copy_from_buffer(req->dst, 340 sg_copy_from_buffer(req->dst,
334 sg_nents_for_len(req->dst, req->dst_len), 341 sg_nents_for_len(req->dst, req->dst_len),
335 req_ctx->out_buf + pos, req->dst_len); 342 out_buf + pos, req->dst_len);
336 343
337done: 344done:
338 kzfree(req_ctx->out_buf); 345 kzfree(req_ctx->out_buf);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 25d26bb18970..e964d068874d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1475 1475
1476 kfree(buf); 1476 kfree(buf);
1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1478 regcache_drop_region(map, reg, reg + 1); 1478 /* regcache_drop_region() takes lock that we already have,
1479 * thus call map->cache_ops->drop() directly
1480 */
1481 if (map->cache_ops && map->cache_ops->drop)
1482 map->cache_ops->drop(map, reg, reg + 1);
1479 } 1483 }
1480 1484
1481 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1485 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 137125b5eae7..5ce71ce7b6c4 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
773 /* Set the number of I2C channel instance */ 773 /* Set the number of I2C channel instance */
774 adap_info->ch_num = id->driver_data; 774 adap_info->ch_num = id->driver_data;
775 775
776 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
777 KBUILD_MODNAME, adap_info);
778 if (ret) {
779 pch_pci_err(pdev, "request_irq FAILED\n");
780 goto err_request_irq;
781 }
782
783 for (i = 0; i < adap_info->ch_num; i++) { 776 for (i = 0; i < adap_info->ch_num; i++) {
784 pch_adap = &adap_info->pch_data[i].pch_adapter; 777 pch_adap = &adap_info->pch_data[i].pch_adapter;
785 adap_info->pch_i2c_suspended = false; 778 adap_info->pch_i2c_suspended = false;
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
797 790
798 pch_adap->dev.of_node = pdev->dev.of_node; 791 pch_adap->dev.of_node = pdev->dev.of_node;
799 pch_adap->dev.parent = &pdev->dev; 792 pch_adap->dev.parent = &pdev->dev;
793 }
794
795 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
796 KBUILD_MODNAME, adap_info);
797 if (ret) {
798 pch_pci_err(pdev, "request_irq FAILED\n");
799 goto err_request_irq;
800 }
801
802 for (i = 0; i < adap_info->ch_num; i++) {
803 pch_adap = &adap_info->pch_data[i].pch_adapter;
800 804
801 pch_i2c_init(&adap_info->pch_data[i]); 805 pch_i2c_init(&adap_info->pch_data[i]);
802 806
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 501bd15cb78e..a8497cfdae6f 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
1599#ifdef CONFIG_PM_SLEEP 1599#ifdef CONFIG_PM_SLEEP
1600static int qup_i2c_suspend(struct device *device) 1600static int qup_i2c_suspend(struct device *device)
1601{ 1601{
1602 qup_i2c_pm_suspend_runtime(device); 1602 if (!pm_runtime_suspended(device))
1603 return qup_i2c_pm_suspend_runtime(device);
1603 return 0; 1604 return 0;
1604} 1605}
1605 1606
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 528e755c468f..3278ebf1cc5c 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
164 /* Only select the channel if its different from the last channel */ 164 /* Only select the channel if its different from the last channel */
165 if (data->last_chan != regval) { 165 if (data->last_chan != regval) {
166 ret = pca954x_reg_write(muxc->parent, client, regval); 166 ret = pca954x_reg_write(muxc->parent, client, regval);
167 data->last_chan = regval; 167 data->last_chan = ret ? 0 : regval;
168 } 168 }
169 169
170 return ret; 170 return ret;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index b2744a64e933..f502c8488be8 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
390 data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ 390 data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
391 } 391 }
392 392
393 error = device_property_read_string(dev, "touchscreen-fw-name", &str); 393 error = device_property_read_string(dev, "firmware-name", &str);
394 if (!error) 394 if (!error)
395 snprintf(data->fw_name, sizeof(data->fw_name), "%s", str); 395 snprintf(data->fw_name, sizeof(data->fw_name),
396 "silead/%s", str);
396 else 397 else
397 dev_dbg(dev, "Firmware file name read error. Using default."); 398 dev_dbg(dev, "Firmware file name read error. Using default.");
398} 399}
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
410 if (!acpi_id) 411 if (!acpi_id)
411 return -ENODEV; 412 return -ENODEV;
412 413
413 snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", 414 snprintf(data->fw_name, sizeof(data->fw_name),
414 acpi_id->id); 415 "silead/%s.fw", acpi_id->id);
415 416
416 for (i = 0; i < strlen(data->fw_name); i++) 417 for (i = 0; i < strlen(data->fw_name); i++)
417 data->fw_name[i] = tolower(data->fw_name[i]); 418 data->fw_name[i] = tolower(data->fw_name[i]);
418 } else { 419 } else {
419 snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", 420 snprintf(data->fw_name, sizeof(data->fw_name),
420 id->name); 421 "silead/%s.fw", id->name);
421 } 422 }
422 423
423 return 0; 424 return 0;
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
426static int silead_ts_set_default_fw_name(struct silead_ts_data *data, 427static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
427 const struct i2c_device_id *id) 428 const struct i2c_device_id *id)
428{ 429{
429 snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name); 430 snprintf(data->fw_name, sizeof(data->fw_name),
431 "silead/%s.fw", id->name);
430 return 0; 432 return 0;
431} 433}
432#endif 434#endif
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index ede5672ab34d..da6c0ba61d4f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
548static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 548static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
549 unsigned long cluster_id) 549 unsigned long cluster_id)
550{ 550{
551 int cpu = *base_cpu; 551 int next_cpu, cpu = *base_cpu;
552 unsigned long mpidr = cpu_logical_map(cpu); 552 unsigned long mpidr = cpu_logical_map(cpu);
553 u16 tlist = 0; 553 u16 tlist = 0;
554 554
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
562 562
563 tlist |= 1 << (mpidr & 0xf); 563 tlist |= 1 << (mpidr & 0xf);
564 564
565 cpu = cpumask_next(cpu, mask); 565 next_cpu = cpumask_next(cpu, mask);
566 if (cpu >= nr_cpu_ids) 566 if (next_cpu >= nr_cpu_ids)
567 goto out; 567 goto out;
568 cpu = next_cpu;
568 569
569 mpidr = cpu_logical_map(cpu); 570 mpidr = cpu_logical_map(cpu);
570 571
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 83f498393a7f..6185696405d5 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
638 if (!gic_local_irq_is_routable(intr)) 638 if (!gic_local_irq_is_routable(intr))
639 return -EPERM; 639 return -EPERM;
640 640
641 /*
642 * HACK: These are all really percpu interrupts, but the rest
643 * of the MIPS kernel code does not use the percpu IRQ API for
644 * the CP0 timer and performance counter interrupts.
645 */
646 switch (intr) {
647 case GIC_LOCAL_INT_TIMER:
648 case GIC_LOCAL_INT_PERFCTR:
649 case GIC_LOCAL_INT_FDC:
650 irq_set_chip_and_handler(virq,
651 &gic_all_vpes_local_irq_controller,
652 handle_percpu_irq);
653 break;
654 default:
655 irq_set_chip_and_handler(virq,
656 &gic_local_irq_controller,
657 handle_percpu_devid_irq);
658 irq_set_percpu_devid(virq);
659 break;
660 }
661
662 spin_lock_irqsave(&gic_lock, flags); 641 spin_lock_irqsave(&gic_lock, flags);
663 for (i = 0; i < gic_vpes; i++) { 642 for (i = 0; i < gic_vpes; i++) {
664 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; 643 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
724 return 0; 703 return 0;
725} 704}
726 705
727static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 706static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
728 irq_hw_number_t hw) 707 unsigned int hwirq)
729{ 708{
730 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 709 struct irq_chip *chip;
731 return gic_local_irq_domain_map(d, virq, hw); 710 int err;
711
712 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
713 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
714 &gic_level_irq_controller,
715 NULL);
716 } else {
717 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
718 case GIC_LOCAL_INT_TIMER:
719 case GIC_LOCAL_INT_PERFCTR:
720 case GIC_LOCAL_INT_FDC:
721 /*
722 * HACK: These are all really percpu interrupts, but
723 * the rest of the MIPS kernel code does not use the
724 * percpu IRQ API for them.
725 */
726 chip = &gic_all_vpes_local_irq_controller;
727 irq_set_handler(virq, handle_percpu_irq);
728 break;
729
730 default:
731 chip = &gic_local_irq_controller;
732 irq_set_handler(virq, handle_percpu_devid_irq);
733 irq_set_percpu_devid(virq);
734 break;
735 }
732 736
733 irq_set_chip_and_handler(virq, &gic_level_irq_controller, 737 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
734 handle_level_irq); 738 chip, NULL);
739 }
735 740
736 return gic_shared_irq_domain_map(d, virq, hw, 0); 741 return err;
737} 742}
738 743
739static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 744static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
744 int cpu, ret, i; 749 int cpu, ret, i;
745 750
746 if (spec->type == GIC_DEVICE) { 751 if (spec->type == GIC_DEVICE) {
747 /* verify that it doesn't conflict with an IPI irq */ 752 /* verify that shared irqs don't conflict with an IPI irq */
748 if (test_bit(spec->hwirq, ipi_resrv)) 753 if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
754 test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
749 return -EBUSY; 755 return -EBUSY;
750 756
751 hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); 757 return gic_setup_dev_chip(d, virq, spec->hwirq);
752
753 return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
754 &gic_level_irq_controller,
755 NULL);
756 } else { 758 } else {
757 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); 759 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
758 if (base_hwirq == gic_shared_intrs) { 760 if (base_hwirq == gic_shared_intrs) {
@@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
821} 823}
822 824
823static const struct irq_domain_ops gic_irq_domain_ops = { 825static const struct irq_domain_ops gic_irq_domain_ops = {
824 .map = gic_irq_domain_map,
825 .alloc = gic_irq_domain_alloc, 826 .alloc = gic_irq_domain_alloc,
826 .free = gic_irq_domain_free, 827 .free = gic_irq_domain_free,
827 .match = gic_irq_domain_match, 828 .match = gic_irq_domain_match,
@@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
852 struct irq_fwspec *fwspec = arg; 853 struct irq_fwspec *fwspec = arg;
853 struct gic_irq_spec spec = { 854 struct gic_irq_spec spec = {
854 .type = GIC_DEVICE, 855 .type = GIC_DEVICE,
855 .hwirq = fwspec->param[1],
856 }; 856 };
857 int i, ret; 857 int i, ret;
858 bool is_shared = fwspec->param[0] == GIC_SHARED;
859 858
860 if (is_shared) { 859 if (fwspec->param[0] == GIC_SHARED)
861 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); 860 spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
862 if (ret) 861 else
863 return ret; 862 spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
864 }
865
866 for (i = 0; i < nr_irqs; i++) {
867 irq_hw_number_t hwirq;
868 863
869 if (is_shared) 864 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
870 hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i); 865 if (ret)
871 else 866 return ret;
872 hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
873 867
874 ret = irq_domain_set_hwirq_and_chip(d, virq + i, 868 for (i = 0; i < nr_irqs; i++) {
875 hwirq, 869 ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
876 &gic_level_irq_controller,
877 NULL);
878 if (ret) 870 if (ret)
879 goto error; 871 goto error;
880 } 872 }
@@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
896static void gic_dev_domain_activate(struct irq_domain *domain, 888static void gic_dev_domain_activate(struct irq_domain *domain,
897 struct irq_data *d) 889 struct irq_data *d)
898{ 890{
899 gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); 891 if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
892 gic_local_irq_domain_map(domain, d->irq, d->hwirq);
893 else
894 gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
900} 895}
901 896
902static struct irq_domain_ops gic_dev_domain_ops = { 897static struct irq_domain_ops gic_dev_domain_ops = {
diff --git a/drivers/media/cec-edid.c b/drivers/media/cec-edid.c
index 70018247bdda..5719b991e340 100644
--- a/drivers/media/cec-edid.c
+++ b/drivers/media/cec-edid.c
@@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
70 u8 tag = edid[i] >> 5; 70 u8 tag = edid[i] >> 5;
71 u8 len = edid[i] & 0x1f; 71 u8 len = edid[i] & 0x1f;
72 72
73 if (tag == 3 && len >= 5 && i + len <= end) 73 if (tag == 3 && len >= 5 && i + len <= end &&
74 edid[i + 1] == 0x03 &&
75 edid[i + 2] == 0x0c &&
76 edid[i + 3] == 0x00)
74 return i + 4; 77 return i + 4;
75 i += len + 1; 78 i += len + 1;
76 } while (i < end); 79 } while (i < end);
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index efec2d1a7afd..4d080da7afaf 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
1552 q->mem_ops = &vb2_dma_sg_memops; 1552 q->mem_ops = &vb2_dma_sg_memops;
1553 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1553 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1554 q->lock = &dev->lock; 1554 q->lock = &dev->lock;
1555 q->dev = &dev->pci->dev;
1555 1556
1556 err = vb2_queue_init(q); 1557 err = vb2_queue_init(q);
1557 if (err < 0) 1558 if (err < 0)
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index db987e5b93eb..59a4b5f7724e 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
1238 q->buf_struct_size = sizeof(struct saa7134_buf); 1238 q->buf_struct_size = sizeof(struct saa7134_buf);
1239 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 1239 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1240 q->lock = &dev->lock; 1240 q->lock = &dev->lock;
1241 q->dev = &dev->pci->dev;
1241 ret = vb2_queue_init(q); 1242 ret = vb2_queue_init(q);
1242 if (ret) { 1243 if (ret) {
1243 vb2_dvb_dealloc_frontends(&dev->frontends); 1244 vb2_dvb_dealloc_frontends(&dev->frontends);
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index ca417a454d67..791a5161809b 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
295 q->buf_struct_size = sizeof(struct saa7134_buf); 295 q->buf_struct_size = sizeof(struct saa7134_buf);
296 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; 296 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
297 q->lock = &dev->lock; 297 q->lock = &dev->lock;
298 q->dev = &dev->pci->dev;
298 err = vb2_queue_init(q); 299 err = vb2_queue_init(q);
299 if (err) 300 if (err)
300 return err; 301 return err;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f25344bc7912..552b635cfce7 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU
169config VIDEO_MEDIATEK_VCODEC 169config VIDEO_MEDIATEK_VCODEC
170 tristate "Mediatek Video Codec driver" 170 tristate "Mediatek Video Codec driver"
171 depends on MTK_IOMMU || COMPILE_TEST 171 depends on MTK_IOMMU || COMPILE_TEST
172 depends on VIDEO_DEV && VIDEO_V4L2 172 depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
173 depends on ARCH_MEDIATEK || COMPILE_TEST 173 depends on ARCH_MEDIATEK || COMPILE_TEST
174 select VIDEOBUF2_DMA_CONTIG 174 select VIDEOBUF2_DMA_CONTIG
175 select V4L2_MEM2MEM_DEV 175 select V4L2_MEM2MEM_DEV
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index 94f0a425be42..3a8e6958adae 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -23,7 +23,6 @@
23#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
24#include <media/videobuf2-core.h> 24#include <media/videobuf2-core.h>
25 25
26#include "mtk_vcodec_util.h"
27 26
28#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv" 27#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
29#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc" 28#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 3ed3f2d31df5..2c5719ac23b2 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
487 struct mtk_q_data *q_data; 487 struct mtk_q_data *q_data;
488 int ret, i; 488 int ret, i;
489 struct mtk_video_fmt *fmt; 489 struct mtk_video_fmt *fmt;
490 unsigned int pitch_w_div16;
491 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; 490 struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
492 491
493 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); 492 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
530 q_data->coded_width = f->fmt.pix_mp.width; 529 q_data->coded_width = f->fmt.pix_mp.width;
531 q_data->coded_height = f->fmt.pix_mp.height; 530 q_data->coded_height = f->fmt.pix_mp.height;
532 531
533 pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16);
534 if (pitch_w_div16 % 8 != 0) {
535 /* Adjust returned width/height, so application could correctly
536 * allocate hw required memory
537 */
538 q_data->visible_height += 32;
539 vidioc_try_fmt(f, q_data->fmt);
540 }
541
542 q_data->field = f->fmt.pix_mp.field; 532 q_data->field = f->fmt.pix_mp.field;
543 ctx->colorspace = f->fmt.pix_mp.colorspace; 533 ctx->colorspace = f->fmt.pix_mp.colorspace;
544 ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc; 534 ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
@@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv)
878{ 868{
879 struct mtk_vcodec_ctx *ctx = priv; 869 struct mtk_vcodec_ctx *ctx = priv;
880 int ret; 870 int ret;
881 struct vb2_buffer *dst_buf; 871 struct vb2_buffer *src_buf, *dst_buf;
872 struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
882 struct mtk_vcodec_mem bs_buf; 873 struct mtk_vcodec_mem bs_buf;
883 struct venc_done_result enc_result; 874 struct venc_done_result enc_result;
884 875
@@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv)
911 mtk_v4l2_err("venc_if_encode failed=%d", ret); 902 mtk_v4l2_err("venc_if_encode failed=%d", ret);
912 return -EINVAL; 903 return -EINVAL;
913 } 904 }
905 src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
906 if (src_buf) {
907 src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
908 dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
909 dst_buf->timestamp = src_buf->timestamp;
910 dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
911 } else {
912 mtk_v4l2_err("No timestamp for the header buffer.");
913 }
914 914
915 ctx->state = MTK_STATE_HEADER; 915 ctx->state = MTK_STATE_HEADER;
916 dst_buf->planes[0].bytesused = enc_result.bs_size; 916 dst_buf->planes[0].bytesused = enc_result.bs_size;
@@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work)
1003 struct mtk_vcodec_mem bs_buf; 1003 struct mtk_vcodec_mem bs_buf;
1004 struct venc_done_result enc_result; 1004 struct venc_done_result enc_result;
1005 int ret, i; 1005 int ret, i;
1006 struct vb2_v4l2_buffer *vb2_v4l2; 1006 struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
1007 1007
1008 /* check dst_buf, dst_buf may be removed in device_run 1008 /* check dst_buf, dst_buf may be removed in device_run
1009 * to stored encdoe header so we need check dst_buf and 1009 * to stored encdoe header so we need check dst_buf and
@@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work)
1043 ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME, 1043 ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
1044 &frm_buf, &bs_buf, &enc_result); 1044 &frm_buf, &bs_buf, &enc_result);
1045 1045
1046 vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf); 1046 src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
1047 dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
1048
1049 dst_buf->timestamp = src_buf->timestamp;
1050 dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
1051
1047 if (enc_result.is_key_frm) 1052 if (enc_result.is_key_frm)
1048 vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME; 1053 dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
1049 1054
1050 if (ret) { 1055 if (ret) {
1051 v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), 1056 v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
@@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
1217 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE); 1222 0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
1218 v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, 1223 v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
1219 V4L2_MPEG_VIDEO_H264_PROFILE_HIGH, 1224 V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
1220 0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN); 1225 0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
1221 v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, 1226 v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
1222 V4L2_MPEG_VIDEO_H264_LEVEL_4_2, 1227 V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
1223 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0); 1228 0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
@@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
1288 1293
1289void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx) 1294void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
1290{ 1295{
1291 venc_if_deinit(ctx); 1296 int ret = venc_if_deinit(ctx);
1297
1298 if (ret)
1299 mtk_v4l2_err("venc_if_deinit failed=%d", ret);
1300
1301 ctx->state = MTK_STATE_FREE;
1292} 1302}
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index c7806ecda2dd..5cd2151431bf 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file)
218 mtk_v4l2_debug(1, "[%d] encoder", ctx->id); 218 mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
219 mutex_lock(&dev->dev_mutex); 219 mutex_lock(&dev->dev_mutex);
220 220
221 /*
222 * Call v4l2_m2m_ctx_release to make sure the worker thread is not
223 * running after venc_if_deinit.
224 */
225 v4l2_m2m_ctx_release(ctx->m2m_ctx);
221 mtk_vcodec_enc_release(ctx); 226 mtk_vcodec_enc_release(ctx);
222 v4l2_fh_del(&ctx->fh); 227 v4l2_fh_del(&ctx->fh);
223 v4l2_fh_exit(&ctx->fh); 228 v4l2_fh_exit(&ctx->fh);
224 v4l2_ctrl_handler_free(&ctx->ctrl_hdl); 229 v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
225 v4l2_m2m_ctx_release(ctx->m2m_ctx);
226 230
227 list_del_init(&ctx->list); 231 list_del_init(&ctx->list);
228 dev->num_instances--; 232 dev->num_instances--;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
index 33e890f5aa9c..12131855b46a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_intr.h
@@ -16,7 +16,6 @@
16#define _MTK_VCODEC_INTR_H_ 16#define _MTK_VCODEC_INTR_H_
17 17
18#define MTK_INST_IRQ_RECEIVED 0x1 18#define MTK_INST_IRQ_RECEIVED 0x1
19#define MTK_INST_WORK_THREAD_ABORT_DONE 0x2
20 19
21struct mtk_vcodec_ctx; 20struct mtk_vcodec_ctx;
22 21
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
index 9a600525b3c1..63d4be4ff327 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c
@@ -61,6 +61,8 @@ enum venc_h264_bs_mode {
61 61
62/* 62/*
63 * struct venc_h264_vpu_config - Structure for h264 encoder configuration 63 * struct venc_h264_vpu_config - Structure for h264 encoder configuration
64 * AP-W/R : AP is writer/reader on this item
65 * VPU-W/R: VPU is write/reader on this item
64 * @input_fourcc: input fourcc 66 * @input_fourcc: input fourcc
65 * @bitrate: target bitrate (in bps) 67 * @bitrate: target bitrate (in bps)
66 * @pic_w: picture width. Picture size is visible stream resolution, in pixels, 68 * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -94,13 +96,13 @@ struct venc_h264_vpu_config {
94 96
95/* 97/*
96 * struct venc_h264_vpu_buf - Structure for buffer information 98 * struct venc_h264_vpu_buf - Structure for buffer information
97 * @align: buffer alignment (in bytes) 99 * AP-W/R : AP is writer/reader on this item
100 * VPU-W/R: VPU is write/reader on this item
98 * @iova: IO virtual address 101 * @iova: IO virtual address
99 * @vpua: VPU side memory addr which is used by RC_CODE 102 * @vpua: VPU side memory addr which is used by RC_CODE
100 * @size: buffer size (in bytes) 103 * @size: buffer size (in bytes)
101 */ 104 */
102struct venc_h264_vpu_buf { 105struct venc_h264_vpu_buf {
103 u32 align;
104 u32 iova; 106 u32 iova;
105 u32 vpua; 107 u32 vpua;
106 u32 size; 108 u32 size;
@@ -108,6 +110,8 @@ struct venc_h264_vpu_buf {
108 110
109/* 111/*
110 * struct venc_h264_vsi - Structure for VPU driver control and info share 112 * struct venc_h264_vsi - Structure for VPU driver control and info share
113 * AP-W/R : AP is writer/reader on this item
114 * VPU-W/R: VPU is write/reader on this item
111 * This structure is allocated in VPU side and shared to AP side. 115 * This structure is allocated in VPU side and shared to AP side.
112 * @config: h264 encoder configuration 116 * @config: h264 encoder configuration
113 * @work_bufs: working buffer information in VPU side 117 * @work_bufs: working buffer information in VPU side
@@ -150,12 +154,6 @@ struct venc_h264_inst {
150 struct mtk_vcodec_ctx *ctx; 154 struct mtk_vcodec_ctx *ctx;
151}; 155};
152 156
153static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr,
154 u32 val)
155{
156 writel(val, inst->hw_base + addr);
157}
158
159static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr) 157static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
160{ 158{
161 return readl(inst->hw_base + addr); 159 return readl(inst->hw_base + addr);
@@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
214 return 40; 212 return 40;
215 case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: 213 case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
216 return 41; 214 return 41;
215 case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
216 return 42;
217 default: 217 default:
218 mtk_vcodec_debug(inst, "unsupported level %d", level); 218 mtk_vcodec_debug(inst, "unsupported level %d", level);
219 return 31; 219 return 31;
diff --git a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
index 60bbcd2a0510..6d9758479f9a 100644
--- a/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c
@@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf {
56 56
57/* 57/*
58 * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration 58 * struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
59 * AP-W/R : AP is writer/reader on this item
60 * VPU-W/R: VPU is write/reader on this item
59 * @input_fourcc: input fourcc 61 * @input_fourcc: input fourcc
60 * @bitrate: target bitrate (in bps) 62 * @bitrate: target bitrate (in bps)
61 * @pic_w: picture width. Picture size is visible stream resolution, in pixels, 63 * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@@ -83,14 +85,14 @@ struct venc_vp8_vpu_config {
83}; 85};
84 86
85/* 87/*
86 * struct venc_vp8_vpu_buf -Structure for buffer information 88 * struct venc_vp8_vpu_buf - Structure for buffer information
87 * @align: buffer alignment (in bytes) 89 * AP-W/R : AP is writer/reader on this item
90 * VPU-W/R: VPU is write/reader on this item
88 * @iova: IO virtual address 91 * @iova: IO virtual address
89 * @vpua: VPU side memory addr which is used by RC_CODE 92 * @vpua: VPU side memory addr which is used by RC_CODE
90 * @size: buffer size (in bytes) 93 * @size: buffer size (in bytes)
91 */ 94 */
92struct venc_vp8_vpu_buf { 95struct venc_vp8_vpu_buf {
93 u32 align;
94 u32 iova; 96 u32 iova;
95 u32 vpua; 97 u32 vpua;
96 u32 size; 98 u32 size;
@@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf {
98 100
99/* 101/*
100 * struct venc_vp8_vsi - Structure for VPU driver control and info share 102 * struct venc_vp8_vsi - Structure for VPU driver control and info share
103 * AP-W/R : AP is writer/reader on this item
104 * VPU-W/R: VPU is write/reader on this item
101 * This structure is allocated in VPU side and shared to AP side. 105 * This structure is allocated in VPU side and shared to AP side.
102 * @config: vp8 encoder configuration 106 * @config: vp8 encoder configuration
103 * @work_bufs: working buffer information in VPU side 107 * @work_bufs: working buffer information in VPU side
@@ -138,12 +142,6 @@ struct venc_vp8_inst {
138 struct mtk_vcodec_ctx *ctx; 142 struct mtk_vcodec_ctx *ctx;
139}; 143};
140 144
141static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr,
142 u32 val)
143{
144 writel(val, inst->hw_base + addr);
145}
146
147static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr) 145static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
148{ 146{
149 return readl(inst->hw_base + addr); 147 return readl(inst->hw_base + addr);
diff --git a/drivers/media/platform/rcar-fcp.c b/drivers/media/platform/rcar-fcp.c
index 6a7bcc3028b1..bc50c69ee0c5 100644
--- a/drivers/media/platform/rcar-fcp.c
+++ b/drivers/media/platform/rcar-fcp.c
@@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
99 */ 99 */
100int rcar_fcp_enable(struct rcar_fcp_device *fcp) 100int rcar_fcp_enable(struct rcar_fcp_device *fcp)
101{ 101{
102 int error;
103
102 if (!fcp) 104 if (!fcp)
103 return 0; 105 return 0;
104 106
105 return pm_runtime_get_sync(fcp->dev); 107 error = pm_runtime_get_sync(fcp->dev);
108 if (error < 0)
109 return error;
110
111 return 0;
106} 112}
107EXPORT_SYMBOL_GPL(rcar_fcp_enable); 113EXPORT_SYMBOL_GPL(rcar_fcp_enable);
108 114
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 32380d5d4f6b..767af2026f8b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1112 1112
1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1114 1114
1115 dev_info(&slot->mmc->class_dev, 1115 if (clock != slot->__clk_old || force_clkinit)
1116 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1116 dev_info(&slot->mmc->class_dev,
1117 slot->id, host->bus_hz, clock, 1117 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1118 div ? ((host->bus_hz / div) >> 1) : 1118 slot->id, host->bus_hz, clock,
1119 host->bus_hz, div); 1119 div ? ((host->bus_hz / div) >> 1) :
1120 host->bus_hz, div);
1120 1121
1121 /* disable clock */ 1122 /* disable clock */
1122 mci_writel(host, CLKENA, 0); 1123 mci_writel(host, CLKENA, 0);
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1139 1140
1140 /* inform CIU */ 1141 /* inform CIU */
1141 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1142 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1143
1144 /* keep the last clock value that was requested from core */
1145 slot->__clk_old = clock;
1142 } 1146 }
1143 1147
1144 host->current_speed = clock; 1148 host->current_speed = clock;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 9e740bc232a8..e8cd2dec3263 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
249 * @queue_node: List node for placing this node in the @queue list of 249 * @queue_node: List node for placing this node in the @queue list of
250 * &struct dw_mci. 250 * &struct dw_mci.
251 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 251 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
252 * @__clk_old: The last clock value that was requested from core.
253 * Keeping track of this helps us to avoid spamming the console.
252 * @flags: Random state bits associated with the slot. 254 * @flags: Random state bits associated with the slot.
253 * @id: Number of this slot. 255 * @id: Number of this slot.
254 * @sdio_id: Number of this slot in the SDIO interrupt registers. 256 * @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -263,6 +265,7 @@ struct dw_mci_slot {
263 struct list_head queue_node; 265 struct list_head queue_node;
264 266
265 unsigned int clock; 267 unsigned int clock;
268 unsigned int __clk_old;
266 269
267 unsigned long flags; 270 unsigned long flags;
268#define DW_MMC_CARD_PRESENT 0 271#define DW_MMC_CARD_PRESENT 0
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index cc07ba0f044d..27fa8b87cd5f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
240 unsigned long flags; 240 unsigned long flags;
241 u32 val; 241 u32 val;
242 242
243 /* Reset ECC hardware */
244 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
245
243 spin_lock_irqsave(&davinci_nand_lock, flags); 246 spin_lock_irqsave(&davinci_nand_lock, flags);
244 247
245 /* Start 4-bit ECC calculation for read/write */ 248 /* Start 4-bit ECC calculation for read/write */
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 25a4fbd4d24a..d54f666417e1 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
366 u8 *data, u32 bytes) 366 u8 *data, u32 bytes)
367{ 367{
368 dma_addr_t addr; 368 dma_addr_t addr;
369 u32 *p, len, i; 369 u8 *p;
370 u32 len, i, val;
370 int ret = 0; 371 int ret = 0;
371 372
372 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); 373 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
392 393
393 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 394 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
394 len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 395 len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
395 p = (u32 *)(data + bytes); 396 p = data + bytes;
396 397
397 /* write the parity bytes generated by the ECC back to the OOB region */ 398 /* write the parity bytes generated by the ECC back to the OOB region */
398 for (i = 0; i < len; i++) 399 for (i = 0; i < len; i++) {
399 p[i] = readl(ecc->regs + ECC_ENCPAR(i)); 400 if ((i % 4) == 0)
401 val = readl(ecc->regs + ECC_ENCPAR(i / 4));
402 p[i] = (val >> ((i % 4) * 8)) & 0xff;
403 }
400timeout: 404timeout:
401 405
402 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 406 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index ddaa2acb9dd7..5223a2182ee4 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -93,6 +93,9 @@
93#define NFI_FSM_MASK (0xf << 16) 93#define NFI_FSM_MASK (0xf << 16)
94#define NFI_ADDRCNTR (0x70) 94#define NFI_ADDRCNTR (0x70)
95#define CNTR_MASK GENMASK(16, 12) 95#define CNTR_MASK GENMASK(16, 12)
96#define ADDRCNTR_SEC_SHIFT (12)
97#define ADDRCNTR_SEC(val) \
98 (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
96#define NFI_STRADDR (0x80) 99#define NFI_STRADDR (0x80)
97#define NFI_BYTELEN (0x84) 100#define NFI_BYTELEN (0x84)
98#define NFI_CSEL (0x90) 101#define NFI_CSEL (0x90)
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
699 } 702 }
700 703
701 ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, 704 ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
702 (reg & CNTR_MASK) >= chip->ecc.steps, 705 ADDRCNTR_SEC(reg) >= chip->ecc.steps,
703 10, MTK_TIMEOUT); 706 10, MTK_TIMEOUT);
704 if (ret) 707 if (ret)
705 dev_err(dev, "hwecc write timeout\n"); 708 dev_err(dev, "hwecc write timeout\n");
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
902 dev_warn(nfc->dev, "read ahb/dma done timeout\n"); 905 dev_warn(nfc->dev, "read ahb/dma done timeout\n");
903 906
904 rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, 907 rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
905 (reg & CNTR_MASK) >= sectors, 10, 908 ADDRCNTR_SEC(reg) >= sectors, 10,
906 MTK_TIMEOUT); 909 MTK_TIMEOUT);
907 if (rc < 0) { 910 if (rc < 0) {
908 dev_err(nfc->dev, "subpage done timeout\n"); 911 dev_err(nfc->dev, "subpage done timeout\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 5173fadc9a4e..57cbe2b83849 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
943 struct nand_chip *nand_chip = mtd_to_nand(mtd); 943 struct nand_chip *nand_chip = mtd_to_nand(mtd);
944 int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; 944 int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
945 945
946 if (section > nand_chip->ecc.steps) 946 if (section >= nand_chip->ecc.steps)
947 return -ERANGE; 947 return -ERANGE;
948 948
949 if (!section) { 949 if (!section) {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index a59361c36f40..5513bfd9cdc9 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -2169,7 +2169,7 @@ scan_tail:
2169 return 0; 2169 return 0;
2170 2170
2171return_error: 2171return_error:
2172 if (info->dma) 2172 if (!IS_ERR_OR_NULL(info->dma))
2173 dma_release_channel(info->dma); 2173 dma_release_channel(info->dma);
2174 if (nand_chip->ecc.priv) { 2174 if (nand_chip->ecc.priv) {
2175 nand_bch_free(nand_chip->ecc.priv); 2175 nand_bch_free(nand_chip->ecc.priv);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 41c0fc9f3b14..16f7cadda5c3 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
1268 struct flexcan_priv *priv = netdev_priv(dev); 1268 struct flexcan_priv *priv = netdev_priv(dev);
1269 int err; 1269 int err;
1270 1270
1271 err = flexcan_chip_disable(priv);
1272 if (err)
1273 return err;
1274
1275 if (netif_running(dev)) { 1271 if (netif_running(dev)) {
1272 err = flexcan_chip_disable(priv);
1273 if (err)
1274 return err;
1276 netif_stop_queue(dev); 1275 netif_stop_queue(dev);
1277 netif_device_detach(dev); 1276 netif_device_detach(dev);
1278 } 1277 }
@@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
1285{ 1284{
1286 struct net_device *dev = dev_get_drvdata(device); 1285 struct net_device *dev = dev_get_drvdata(device);
1287 struct flexcan_priv *priv = netdev_priv(dev); 1286 struct flexcan_priv *priv = netdev_priv(dev);
1287 int err;
1288 1288
1289 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1289 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1290 if (netif_running(dev)) { 1290 if (netif_running(dev)) {
1291 netif_device_attach(dev); 1291 netif_device_attach(dev);
1292 netif_start_queue(dev); 1292 netif_start_queue(dev);
1293 err = flexcan_chip_enable(priv);
1294 if (err)
1295 return err;
1293 } 1296 }
1294 return flexcan_chip_enable(priv); 1297 return 0;
1295} 1298}
1296 1299
1297static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); 1300static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2d1d22eec750..368bb0710d8f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -81,6 +81,10 @@
81#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15) 81#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
82 82
83#define IFI_CANFD_TDELAY 0x1c 83#define IFI_CANFD_TDELAY 0x1c
84#define IFI_CANFD_TDELAY_DEFAULT 0xb
85#define IFI_CANFD_TDELAY_MASK 0x3fff
86#define IFI_CANFD_TDELAY_ABS BIT(14)
87#define IFI_CANFD_TDELAY_EN BIT(15)
84 88
85#define IFI_CANFD_ERROR 0x20 89#define IFI_CANFD_ERROR 0x20
86#define IFI_CANFD_ERROR_TX_OFFSET 0 90#define IFI_CANFD_ERROR_TX_OFFSET 0
@@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
641 struct ifi_canfd_priv *priv = netdev_priv(ndev); 645 struct ifi_canfd_priv *priv = netdev_priv(ndev);
642 const struct can_bittiming *bt = &priv->can.bittiming; 646 const struct can_bittiming *bt = &priv->can.bittiming;
643 const struct can_bittiming *dbt = &priv->can.data_bittiming; 647 const struct can_bittiming *dbt = &priv->can.data_bittiming;
644 u16 brp, sjw, tseg1, tseg2; 648 u16 brp, sjw, tseg1, tseg2, tdc;
645 649
646 /* Configure bit timing */ 650 /* Configure bit timing */
647 brp = bt->brp - 2; 651 brp = bt->brp - 2;
@@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
664 (brp << IFI_CANFD_TIME_PRESCALE_OFF) | 668 (brp << IFI_CANFD_TIME_PRESCALE_OFF) |
665 (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8), 669 (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
666 priv->base + IFI_CANFD_FTIME); 670 priv->base + IFI_CANFD_FTIME);
671
672 /* Configure transmitter delay */
673 tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
674 writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
675 priv->base + IFI_CANFD_TDELAY);
667} 676}
668 677
669static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id, 678static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8fc3f3c137f8..505ceaf451e2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
6356 struct bnx2 *bp = netdev_priv(dev); 6356 struct bnx2 *bp = netdev_priv(dev);
6357 int rc; 6357 int rc;
6358 6358
6359 rc = bnx2_request_firmware(bp);
6360 if (rc < 0)
6361 goto out;
6362
6363 netif_carrier_off(dev); 6359 netif_carrier_off(dev);
6364 6360
6365 bnx2_disable_int(bp); 6361 bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
6428 bnx2_free_irq(bp); 6424 bnx2_free_irq(bp);
6429 bnx2_free_mem(bp); 6425 bnx2_free_mem(bp);
6430 bnx2_del_napi(bp); 6426 bnx2_del_napi(bp);
6431 bnx2_release_firmware(bp);
6432 goto out; 6427 goto out;
6433} 6428}
6434 6429
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8575 8570
8576 pci_set_drvdata(pdev, dev); 8571 pci_set_drvdata(pdev, dev);
8577 8572
8573 rc = bnx2_request_firmware(bp);
8574 if (rc < 0)
8575 goto error;
8576
8577
8578 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
8578 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); 8579 memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
8579 8580
8580 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 8581 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8607 return 0; 8608 return 0;
8608 8609
8609error: 8610error:
8611 bnx2_release_firmware(bp);
8610 pci_iounmap(pdev, bp->regview); 8612 pci_iounmap(pdev, bp->regview);
8611 pci_release_regions(pdev); 8613 pci_release_regions(pdev);
8612 pci_disable_device(pdev); 8614 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 0e4fdc3dd729..31f61a744d66 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -31,15 +31,10 @@
31#define BNAD_NUM_TXF_COUNTERS 12 31#define BNAD_NUM_TXF_COUNTERS 12
32#define BNAD_NUM_RXF_COUNTERS 10 32#define BNAD_NUM_RXF_COUNTERS 10
33#define BNAD_NUM_CQ_COUNTERS (3 + 5) 33#define BNAD_NUM_CQ_COUNTERS (3 + 5)
34#define BNAD_NUM_RXQ_COUNTERS 6 34#define BNAD_NUM_RXQ_COUNTERS 7
35#define BNAD_NUM_TXQ_COUNTERS 5 35#define BNAD_NUM_TXQ_COUNTERS 5
36 36
37#define BNAD_ETHTOOL_STATS_NUM \ 37static const char *bnad_net_stats_strings[] = {
38 (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
39 sizeof(struct bnad_drv_stats) / sizeof(u64) + \
40 offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
41
42static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
43 "rx_packets", 38 "rx_packets",
44 "tx_packets", 39 "tx_packets",
45 "rx_bytes", 40 "rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
50 "tx_dropped", 45 "tx_dropped",
51 "multicast", 46 "multicast",
52 "collisions", 47 "collisions",
53
54 "rx_length_errors", 48 "rx_length_errors",
55 "rx_over_errors",
56 "rx_crc_errors", 49 "rx_crc_errors",
57 "rx_frame_errors", 50 "rx_frame_errors",
58 "rx_fifo_errors",
59 "rx_missed_errors",
60
61 "tx_aborted_errors",
62 "tx_carrier_errors",
63 "tx_fifo_errors", 51 "tx_fifo_errors",
64 "tx_heartbeat_errors",
65 "tx_window_errors",
66
67 "rx_compressed",
68 "tx_compressed",
69 52
70 "netif_queue_stop", 53 "netif_queue_stop",
71 "netif_queue_wakeup", 54 "netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
254 "fc_tx_fid_parity_errors", 237 "fc_tx_fid_parity_errors",
255}; 238};
256 239
240#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
241
257static int 242static int
258bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) 243bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
259{ 244{
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
658 string += ETH_GSTRING_LEN; 643 string += ETH_GSTRING_LEN;
659 sprintf(string, "rxq%d_allocbuf_failed", q_num); 644 sprintf(string, "rxq%d_allocbuf_failed", q_num);
660 string += ETH_GSTRING_LEN; 645 string += ETH_GSTRING_LEN;
646 sprintf(string, "rxq%d_mapbuf_failed", q_num);
647 string += ETH_GSTRING_LEN;
661 sprintf(string, "rxq%d_producer_index", q_num); 648 sprintf(string, "rxq%d_producer_index", q_num);
662 string += ETH_GSTRING_LEN; 649 string += ETH_GSTRING_LEN;
663 sprintf(string, "rxq%d_consumer_index", q_num); 650 sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
678 sprintf(string, "rxq%d_allocbuf_failed", 665 sprintf(string, "rxq%d_allocbuf_failed",
679 q_num); 666 q_num);
680 string += ETH_GSTRING_LEN; 667 string += ETH_GSTRING_LEN;
668 sprintf(string, "rxq%d_mapbuf_failed",
669 q_num);
670 string += ETH_GSTRING_LEN;
681 sprintf(string, "rxq%d_producer_index", 671 sprintf(string, "rxq%d_producer_index",
682 q_num); 672 q_num);
683 string += ETH_GSTRING_LEN; 673 string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
854 u64 *buf) 844 u64 *buf)
855{ 845{
856 struct bnad *bnad = netdev_priv(netdev); 846 struct bnad *bnad = netdev_priv(netdev);
857 int i, j, bi; 847 int i, j, bi = 0;
858 unsigned long flags; 848 unsigned long flags;
859 struct rtnl_link_stats64 *net_stats64; 849 struct rtnl_link_stats64 net_stats64;
860 u64 *stats64; 850 u64 *stats64;
861 u32 bmap; 851 u32 bmap;
862 852
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
871 * under the same lock 861 * under the same lock
872 */ 862 */
873 spin_lock_irqsave(&bnad->bna_lock, flags); 863 spin_lock_irqsave(&bnad->bna_lock, flags);
874 bi = 0;
875 memset(buf, 0, stats->n_stats * sizeof(u64));
876
877 net_stats64 = (struct rtnl_link_stats64 *)buf;
878 bnad_netdev_qstats_fill(bnad, net_stats64);
879 bnad_netdev_hwstats_fill(bnad, net_stats64);
880 864
881 bi = sizeof(*net_stats64) / sizeof(u64); 865 memset(&net_stats64, 0, sizeof(net_stats64));
866 bnad_netdev_qstats_fill(bnad, &net_stats64);
867 bnad_netdev_hwstats_fill(bnad, &net_stats64);
868
869 buf[bi++] = net_stats64.rx_packets;
870 buf[bi++] = net_stats64.tx_packets;
871 buf[bi++] = net_stats64.rx_bytes;
872 buf[bi++] = net_stats64.tx_bytes;
873 buf[bi++] = net_stats64.rx_errors;
874 buf[bi++] = net_stats64.tx_errors;
875 buf[bi++] = net_stats64.rx_dropped;
876 buf[bi++] = net_stats64.tx_dropped;
877 buf[bi++] = net_stats64.multicast;
878 buf[bi++] = net_stats64.collisions;
879 buf[bi++] = net_stats64.rx_length_errors;
880 buf[bi++] = net_stats64.rx_crc_errors;
881 buf[bi++] = net_stats64.rx_frame_errors;
882 buf[bi++] = net_stats64.tx_fifo_errors;
882 883
883 /* Get netif_queue_stopped from stack */ 884 /* Get netif_queue_stopped from stack */
884 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); 885 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2e2aa9fec9bb..edd23386b47d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -419,8 +419,8 @@ struct link_config {
419 unsigned short supported; /* link capabilities */ 419 unsigned short supported; /* link capabilities */
420 unsigned short advertising; /* advertised capabilities */ 420 unsigned short advertising; /* advertised capabilities */
421 unsigned short lp_advertising; /* peer advertised capabilities */ 421 unsigned short lp_advertising; /* peer advertised capabilities */
422 unsigned short requested_speed; /* speed user has requested */ 422 unsigned int requested_speed; /* speed user has requested */
423 unsigned short speed; /* actual link speed */ 423 unsigned int speed; /* actual link speed */
424 unsigned char requested_fc; /* flow control user has requested */ 424 unsigned char requested_fc; /* flow control user has requested */
425 unsigned char fc; /* actual link flow control */ 425 unsigned char fc; /* actual link flow control */
426 unsigned char autoneg; /* autonegotiating? */ 426 unsigned char autoneg; /* autonegotiating? */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c762a8c8c954..3ceafb55d6da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4305,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
4305 .resume = eeh_resume, 4305 .resume = eeh_resume,
4306}; 4306};
4307 4307
4308/* Return true if the Link Configuration supports "High Speeds" (those greater
4309 * than 1Gb/s).
4310 */
4308static inline bool is_x_10g_port(const struct link_config *lc) 4311static inline bool is_x_10g_port(const struct link_config *lc)
4309{ 4312{
4310 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || 4313 unsigned int speeds, high_speeds;
4311 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; 4314
4315 speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
4316 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
4317
4318 return high_speeds != 0;
4312} 4319}
4313 4320
4314static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, 4321static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -4756,8 +4763,12 @@ static void print_port_info(const struct net_device *dev)
4756 bufp += sprintf(bufp, "1000/"); 4763 bufp += sprintf(bufp, "1000/");
4757 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 4764 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4758 bufp += sprintf(bufp, "10G/"); 4765 bufp += sprintf(bufp, "10G/");
4766 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
4767 bufp += sprintf(bufp, "25G/");
4759 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) 4768 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4760 bufp += sprintf(bufp, "40G/"); 4769 bufp += sprintf(bufp, "40G/");
4770 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
4771 bufp += sprintf(bufp, "100G/");
4761 if (bufp != buf) 4772 if (bufp != buf)
4762 --bufp; 4773 --bufp;
4763 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); 4774 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index dc92c80a75f4..660204bff726 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3627} 3627}
3628 3628
3629#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 3629#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
3630 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 3630 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
3631 FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
3631 FW_PORT_CAP_ANEG) 3632 FW_PORT_CAP_ANEG)
3632 3633
3633/** 3634/**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
7196 speed = 1000; 7197 speed = 1000;
7197 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 7198 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
7198 speed = 10000; 7199 speed = 10000;
7200 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
7201 speed = 25000;
7199 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 7202 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
7200 speed = 40000; 7203 speed = 40000;
7204 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
7205 speed = 100000;
7201 7206
7202 lc = &pi->link_cfg; 7207 lc = &pi->link_cfg;
7203 7208
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a89b30720e38..30507d44422c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2265,6 +2265,12 @@ enum fw_port_cap {
2265 FW_PORT_CAP_802_3_ASM_DIR = 0x8000, 2265 FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
2266}; 2266};
2267 2267
2268#define FW_PORT_CAP_SPEED_S 0
2269#define FW_PORT_CAP_SPEED_M 0x3f
2270#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
2271#define FW_PORT_CAP_SPEED_G(x) \
2272 (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
2273
2268enum fw_port_mdi { 2274enum fw_port_mdi {
2269 FW_PORT_CAP_MDI_UNCHANGED, 2275 FW_PORT_CAP_MDI_UNCHANGED,
2270 FW_PORT_CAP_MDI_AUTO, 2276 FW_PORT_CAP_MDI_AUTO,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8ee541431e8b..17a2bbcf93f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -108,8 +108,8 @@ struct link_config {
108 unsigned int supported; /* link capabilities */ 108 unsigned int supported; /* link capabilities */
109 unsigned int advertising; /* advertised capabilities */ 109 unsigned int advertising; /* advertised capabilities */
110 unsigned short lp_advertising; /* peer advertised capabilities */ 110 unsigned short lp_advertising; /* peer advertised capabilities */
111 unsigned short requested_speed; /* speed user has requested */ 111 unsigned int requested_speed; /* speed user has requested */
112 unsigned short speed; /* actual link speed */ 112 unsigned int speed; /* actual link speed */
113 unsigned char requested_fc; /* flow control user has requested */ 113 unsigned char requested_fc; /* flow control user has requested */
114 unsigned char fc; /* actual link flow control */ 114 unsigned char fc; /* actual link flow control */
115 unsigned char autoneg; /* autonegotiating? */ 115 unsigned char autoneg; /* autonegotiating? */
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
271 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; 271 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
272} 272}
273 273
274/* Return true if the Link Configuration supports "High Speeds" (those greater
275 * than 1Gb/s).
276 */
274static inline bool is_x_10g_port(const struct link_config *lc) 277static inline bool is_x_10g_port(const struct link_config *lc)
275{ 278{
276 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || 279 unsigned int speeds, high_speeds;
277 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; 280
281 speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
282 high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
283
284 return high_speeds != 0;
278} 285}
279 286
280static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) 287static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 427bfa71388b..b5622b1689e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
314} 314}
315 315
316#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 316#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
317 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 317 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
318 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) 318 FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
319 FW_PORT_CAP_ANEG)
319 320
320/** 321/**
321 * init_link_config - initialize a link's SW state 322 * init_link_config - initialize a link's SW state
@@ -1712,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1712 speed = 1000; 1713 speed = 1000;
1713 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) 1714 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
1714 speed = 10000; 1715 speed = 10000;
1716 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
1717 speed = 25000;
1715 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) 1718 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
1716 speed = 40000; 1719 speed = 40000;
1720 else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
1721 speed = 100000;
1717 1722
1718 /* 1723 /*
1719 * Scan all of our "ports" (Virtual Interfaces) looking for 1724 * Scan all of our "ports" (Virtual Interfaces) looking for
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4c9771d57d6e..7af09cbc53f0 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
977 dev->mcast_pending = 1; 977 dev->mcast_pending = 1;
978 return; 978 return;
979 } 979 }
980
981 mutex_lock(&dev->link_lock);
980 __emac_set_multicast_list(dev); 982 __emac_set_multicast_list(dev);
983 mutex_unlock(&dev->link_lock);
984}
985
986static int emac_set_mac_address(struct net_device *ndev, void *sa)
987{
988 struct emac_instance *dev = netdev_priv(ndev);
989 struct sockaddr *addr = sa;
990 struct emac_regs __iomem *p = dev->emacp;
991
992 if (!is_valid_ether_addr(addr->sa_data))
993 return -EADDRNOTAVAIL;
994
995 mutex_lock(&dev->link_lock);
996
997 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
998
999 emac_rx_disable(dev);
1000 emac_tx_disable(dev);
1001 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
1002 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
1003 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
1004 ndev->dev_addr[5]);
1005 emac_tx_enable(dev);
1006 emac_rx_enable(dev);
1007
1008 mutex_unlock(&dev->link_lock);
1009
1010 return 0;
981} 1011}
982 1012
983static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) 1013static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
2686 .ndo_do_ioctl = emac_ioctl, 2716 .ndo_do_ioctl = emac_ioctl,
2687 .ndo_tx_timeout = emac_tx_timeout, 2717 .ndo_tx_timeout = emac_tx_timeout,
2688 .ndo_validate_addr = eth_validate_addr, 2718 .ndo_validate_addr = eth_validate_addr,
2689 .ndo_set_mac_address = eth_mac_addr, 2719 .ndo_set_mac_address = emac_set_mac_address,
2690 .ndo_start_xmit = emac_start_xmit, 2720 .ndo_start_xmit = emac_start_xmit,
2691 .ndo_change_mtu = eth_change_mtu, 2721 .ndo_change_mtu = eth_change_mtu,
2692}; 2722};
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
2699 .ndo_do_ioctl = emac_ioctl, 2729 .ndo_do_ioctl = emac_ioctl,
2700 .ndo_tx_timeout = emac_tx_timeout, 2730 .ndo_tx_timeout = emac_tx_timeout,
2701 .ndo_validate_addr = eth_validate_addr, 2731 .ndo_validate_addr = eth_validate_addr,
2702 .ndo_set_mac_address = eth_mac_addr, 2732 .ndo_set_mac_address = emac_set_mac_address,
2703 .ndo_start_xmit = emac_start_xmit_sg, 2733 .ndo_start_xmit = emac_start_xmit_sg,
2704 .ndo_change_mtu = emac_change_mtu, 2734 .ndo_change_mtu = emac_change_mtu,
2705}; 2735};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d9199151a83e..3743af8f1ded 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1923,6 +1923,7 @@ const struct of_device_id of_mtk_match[] = {
1923 { .compatible = "mediatek,mt7623-eth" }, 1923 { .compatible = "mediatek,mt7623-eth" },
1924 {}, 1924 {},
1925}; 1925};
1926MODULE_DEVICE_TABLE(of, of_mtk_match);
1926 1927
1927static struct platform_driver mtk_driver = { 1928static struct platform_driver mtk_driver = {
1928 .probe = mtk_probe, 1929 .probe = mtk_probe,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index f613977455e0..cf8f8a72a801 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
1305 return 0; 1305 return 0;
1306 1306
1307err_out_unmap: 1307err_out_unmap:
1308 while (i >= 0) 1308 while (i > 0)
1309 mlx4_free_eq(dev, &priv->eq_table.eq[i--]); 1309 mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
1310#ifdef CONFIG_RFS_ACCEL 1310#ifdef CONFIG_RFS_ACCEL
1311 for (i = 1; i <= dev->caps.num_ports; i++) { 1311 for (i = 1; i <= dev->caps.num_ports; i++) {
1312 if (mlx4_priv(dev)->port[i].rmap) { 1312 if (mlx4_priv(dev)->port[i].rmap) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75dd2e3d3059..7183ac4135d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2970 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2970 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2971 device_remove_file(&info->dev->persist->pdev->dev, 2971 device_remove_file(&info->dev->persist->pdev->dev,
2972 &info->port_attr); 2972 &info->port_attr);
2973 devlink_port_unregister(&info->devlink_port);
2973 info->port = -1; 2974 info->port = -1;
2974 } 2975 }
2975 2976
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2984 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2985 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
2985 device_remove_file(&info->dev->persist->pdev->dev, 2986 device_remove_file(&info->dev->persist->pdev->dev,
2986 &info->port_mtu_attr); 2987 &info->port_mtu_attr);
2988 devlink_port_unregister(&info->devlink_port);
2989
2987#ifdef CONFIG_RFS_ACCEL 2990#ifdef CONFIG_RFS_ACCEL
2988 free_irq_cpu_rmap(info->rmap); 2991 free_irq_cpu_rmap(info->rmap);
2989 info->rmap = NULL; 2992 info->rmap = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8b78f156214e..b247949df135 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1554,6 +1554,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1554 1554
1555abort: 1555abort:
1556 esw_enable_vport(esw, 0, UC_ADDR_CHANGE); 1556 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1557 esw->mode = SRIOV_NONE;
1557 return err; 1558 return err;
1558} 1559}
1559 1560
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 3dc83a9459a4..7de40e6b0c25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -446,7 +446,7 @@ out:
446 446
447static int esw_offloads_start(struct mlx5_eswitch *esw) 447static int esw_offloads_start(struct mlx5_eswitch *esw)
448{ 448{
449 int err, num_vfs = esw->dev->priv.sriov.num_vfs; 449 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
450 450
451 if (esw->mode != SRIOV_LEGACY) { 451 if (esw->mode != SRIOV_LEGACY) {
452 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); 452 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@@ -455,8 +455,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
455 455
456 mlx5_eswitch_disable_sriov(esw); 456 mlx5_eswitch_disable_sriov(esw);
457 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); 457 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
458 if (err) 458 if (err) {
459 esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err); 459 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
460 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
461 if (err1)
462 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
463 }
460 return err; 464 return err;
461} 465}
462 466
@@ -508,12 +512,16 @@ create_ft_err:
508 512
509static int esw_offloads_stop(struct mlx5_eswitch *esw) 513static int esw_offloads_stop(struct mlx5_eswitch *esw)
510{ 514{
511 int err, num_vfs = esw->dev->priv.sriov.num_vfs; 515 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
512 516
513 mlx5_eswitch_disable_sriov(esw); 517 mlx5_eswitch_disable_sriov(esw);
514 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 518 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
515 if (err) 519 if (err) {
516 esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err); 520 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
521 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
522 if (err1)
523 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
524 }
517 525
518 return err; 526 return err;
519} 527}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9134010e2921..287ade151ec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -425,11 +425,11 @@ struct mlx5_cmd_fc_bulk *
425mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) 425mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
426{ 426{
427 struct mlx5_cmd_fc_bulk *b; 427 struct mlx5_cmd_fc_bulk *b;
428 int outlen = sizeof(*b) + 428 int outlen =
429 MLX5_ST_SZ_BYTES(query_flow_counter_out) + 429 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
430 MLX5_ST_SZ_BYTES(traffic_counter) * num; 430 MLX5_ST_SZ_BYTES(traffic_counter) * num;
431 431
432 b = kzalloc(outlen, GFP_KERNEL); 432 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
433 if (!b) 433 if (!b)
434 return NULL; 434 return NULL;
435 435
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 252e4924de0f..39dadfca84ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2044,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
2044 2044
2045 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), 2045 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
2046 GFP_KERNEL); 2046 GFP_KERNEL);
2047 if (!nn->rx_rings) 2047 if (!nn->rx_rings) {
2048 err = -ENOMEM;
2048 goto err_free_lsc; 2049 goto err_free_lsc;
2050 }
2049 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), 2051 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
2050 GFP_KERNEL); 2052 GFP_KERNEL);
2051 if (!nn->tx_rings) 2053 if (!nn->tx_rings) {
2054 err = -ENOMEM;
2052 goto err_free_rx_rings; 2055 goto err_free_rx_rings;
2056 }
2053 2057
2054 for (r = 0; r < nn->num_r_vecs; r++) { 2058 for (r = 0; r < nn->num_r_vecs; r++) {
2055 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); 2059 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index a240f26344a4..f776a77794c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1153,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1153 p_drv_version = &union_data.drv_version; 1153 p_drv_version = &union_data.drv_version;
1154 p_drv_version->version = p_ver->version; 1154 p_drv_version->version = p_ver->version;
1155 1155
1156 for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { 1156 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
1157 val = cpu_to_be32(p_ver->name[i]); 1157 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
1158 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; 1158 *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1159 } 1159 }
1160 1160
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index cbefe9e2207c..885a5e64519d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
261 } 261 }
262 if (mode & WAKE_UCAST) { 262 if (mode & WAKE_UCAST) {
263 pr_debug("GMAC: WOL on global unicast\n"); 263 pr_debug("GMAC: WOL on global unicast\n");
264 pmt |= global_unicast; 264 pmt |= power_down | global_unicast | wake_up_frame_en;
265 } 265 }
266 266
267 writel(pmt, ioaddr + GMAC_PMT); 267 writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index df5580dcdfed..51019b794be5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
102 } 102 }
103 if (mode & WAKE_UCAST) { 103 if (mode & WAKE_UCAST) {
104 pr_debug("GMAC: WOL on global unicast\n"); 104 pr_debug("GMAC: WOL on global unicast\n");
105 pmt |= global_unicast; 105 pmt |= power_down | global_unicast | wake_up_frame_en;
106 } 106 }
107 107
108 writel(pmt, ioaddr + GMAC_PMT); 108 writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c
index 775674808249..92af182951be 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/phy/mdio-xgene.c
@@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev)
424 mdiobus_unregister(mdio_bus); 424 mdiobus_unregister(mdio_bus);
425 mdiobus_free(mdio_bus); 425 mdiobus_free(mdio_bus);
426 426
427 if (dev->of_node) { 427 if (dev->of_node)
428 if (IS_ERR(pdata->clk)) 428 clk_disable_unprepare(pdata->clk);
429 clk_disable_unprepare(pdata->clk);
430 }
431 429
432 return 0; 430 return 0;
433} 431}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f41a8ad4740e..c254248863d4 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
32#define NETNEXT_VERSION "08" 32#define NETNEXT_VERSION "08"
33 33
34/* Information for net */ 34/* Information for net */
35#define NET_VERSION "5" 35#define NET_VERSION "6"
36 36
37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 37#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 38#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -2552,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
2552 } 2552 }
2553} 2553}
2554 2554
2555static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
2556{
2557 ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
2558 ocp_reg_write(tp, OCP_EEE_DATA, reg);
2559 ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
2560}
2561
2562static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
2563{
2564 u16 data;
2565
2566 r8152_mmd_indirect(tp, dev, reg);
2567 data = ocp_reg_read(tp, OCP_EEE_DATA);
2568 ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
2569
2570 return data;
2571}
2572
2573static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
2574{
2575 r8152_mmd_indirect(tp, dev, reg);
2576 ocp_reg_write(tp, OCP_EEE_DATA, data);
2577 ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
2578}
2579
2580static void r8152_eee_en(struct r8152 *tp, bool enable)
2581{
2582 u16 config1, config2, config3;
2583 u32 ocp_data;
2584
2585 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
2586 config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
2587 config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
2588 config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
2589
2590 if (enable) {
2591 ocp_data |= EEE_RX_EN | EEE_TX_EN;
2592 config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
2593 config1 |= sd_rise_time(1);
2594 config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
2595 config3 |= fast_snr(42);
2596 } else {
2597 ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
2598 config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
2599 RX_QUIET_EN);
2600 config1 |= sd_rise_time(7);
2601 config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
2602 config3 |= fast_snr(511);
2603 }
2604
2605 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
2606 ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
2607 ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
2608 ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
2609}
2610
2611static void r8152b_enable_eee(struct r8152 *tp)
2612{
2613 r8152_eee_en(tp, true);
2614 r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
2615}
2616
2617static void r8152b_enable_fc(struct r8152 *tp)
2618{
2619 u16 anar;
2620
2621 anar = r8152_mdio_read(tp, MII_ADVERTISE);
2622 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
2623 r8152_mdio_write(tp, MII_ADVERTISE, anar);
2624}
2625
2555static void rtl8152_disable(struct r8152 *tp) 2626static void rtl8152_disable(struct r8152 *tp)
2556{ 2627{
2557 r8152_aldps_en(tp, false); 2628 r8152_aldps_en(tp, false);
@@ -2561,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp)
2561 2632
2562static void r8152b_hw_phy_cfg(struct r8152 *tp) 2633static void r8152b_hw_phy_cfg(struct r8152 *tp)
2563{ 2634{
2564 u16 data; 2635 r8152b_enable_eee(tp);
2565 2636 r8152_aldps_en(tp, true);
2566 data = r8152_mdio_read(tp, MII_BMCR); 2637 r8152b_enable_fc(tp);
2567 if (data & BMCR_PDOWN) {
2568 data &= ~BMCR_PDOWN;
2569 r8152_mdio_write(tp, MII_BMCR, data);
2570 }
2571 2638
2572 set_bit(PHY_RESET, &tp->flags); 2639 set_bit(PHY_RESET, &tp->flags);
2573} 2640}
@@ -2701,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
2701 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2768 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
2702} 2769}
2703 2770
2771static void r8153_aldps_en(struct r8152 *tp, bool enable)
2772{
2773 u16 data;
2774
2775 data = ocp_reg_read(tp, OCP_POWER_CFG);
2776 if (enable) {
2777 data |= EN_ALDPS;
2778 ocp_reg_write(tp, OCP_POWER_CFG, data);
2779 } else {
2780 data &= ~EN_ALDPS;
2781 ocp_reg_write(tp, OCP_POWER_CFG, data);
2782 msleep(20);
2783 }
2784}
2785
2786static void r8153_eee_en(struct r8152 *tp, bool enable)
2787{
2788 u32 ocp_data;
2789 u16 config;
2790
2791 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
2792 config = ocp_reg_read(tp, OCP_EEE_CFG);
2793
2794 if (enable) {
2795 ocp_data |= EEE_RX_EN | EEE_TX_EN;
2796 config |= EEE10_EN;
2797 } else {
2798 ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
2799 config &= ~EEE10_EN;
2800 }
2801
2802 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
2803 ocp_reg_write(tp, OCP_EEE_CFG, config);
2804}
2805
2704static void r8153_hw_phy_cfg(struct r8152 *tp) 2806static void r8153_hw_phy_cfg(struct r8152 *tp)
2705{ 2807{
2706 u32 ocp_data; 2808 u32 ocp_data;
2707 u16 data; 2809 u16 data;
2708 2810
2709 if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || 2811 /* disable ALDPS before updating the PHY parameters */
2710 tp->version == RTL_VER_05) 2812 r8153_aldps_en(tp, false);
2711 ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
2712 2813
2713 data = r8152_mdio_read(tp, MII_BMCR); 2814 /* disable EEE before updating the PHY parameters */
2714 if (data & BMCR_PDOWN) { 2815 r8153_eee_en(tp, false);
2715 data &= ~BMCR_PDOWN; 2816 ocp_reg_write(tp, OCP_EEE_ADV, 0);
2716 r8152_mdio_write(tp, MII_BMCR, data);
2717 }
2718 2817
2719 if (tp->version == RTL_VER_03) { 2818 if (tp->version == RTL_VER_03) {
2720 data = ocp_reg_read(tp, OCP_EEE_CFG); 2819 data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -2745,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2745 sram_write(tp, SRAM_10M_AMP1, 0x00af); 2844 sram_write(tp, SRAM_10M_AMP1, 0x00af);
2746 sram_write(tp, SRAM_10M_AMP2, 0x0208); 2845 sram_write(tp, SRAM_10M_AMP2, 0x0208);
2747 2846
2847 r8153_eee_en(tp, true);
2848 ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
2849
2850 r8153_aldps_en(tp, true);
2851 r8152b_enable_fc(tp);
2852
2748 set_bit(PHY_RESET, &tp->flags); 2853 set_bit(PHY_RESET, &tp->flags);
2749} 2854}
2750 2855
@@ -2866,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp)
2866 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2971 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
2867} 2972}
2868 2973
2869static void r8153_aldps_en(struct r8152 *tp, bool enable)
2870{
2871 u16 data;
2872
2873 data = ocp_reg_read(tp, OCP_POWER_CFG);
2874 if (enable) {
2875 data |= EN_ALDPS;
2876 ocp_reg_write(tp, OCP_POWER_CFG, data);
2877 } else {
2878 data &= ~EN_ALDPS;
2879 ocp_reg_write(tp, OCP_POWER_CFG, data);
2880 msleep(20);
2881 }
2882}
2883
2884static void rtl8153_disable(struct r8152 *tp) 2974static void rtl8153_disable(struct r8152 *tp)
2885{ 2975{
2886 r8153_aldps_en(tp, false); 2976 r8153_aldps_en(tp, false);
@@ -3246,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev)
3246 return res; 3336 return res;
3247} 3337}
3248 3338
3249static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
3250{
3251 ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
3252 ocp_reg_write(tp, OCP_EEE_DATA, reg);
3253 ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
3254}
3255
3256static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
3257{
3258 u16 data;
3259
3260 r8152_mmd_indirect(tp, dev, reg);
3261 data = ocp_reg_read(tp, OCP_EEE_DATA);
3262 ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
3263
3264 return data;
3265}
3266
3267static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
3268{
3269 r8152_mmd_indirect(tp, dev, reg);
3270 ocp_reg_write(tp, OCP_EEE_DATA, data);
3271 ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
3272}
3273
3274static void r8152_eee_en(struct r8152 *tp, bool enable)
3275{
3276 u16 config1, config2, config3;
3277 u32 ocp_data;
3278
3279 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
3280 config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
3281 config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
3282 config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
3283
3284 if (enable) {
3285 ocp_data |= EEE_RX_EN | EEE_TX_EN;
3286 config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
3287 config1 |= sd_rise_time(1);
3288 config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
3289 config3 |= fast_snr(42);
3290 } else {
3291 ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
3292 config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
3293 RX_QUIET_EN);
3294 config1 |= sd_rise_time(7);
3295 config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
3296 config3 |= fast_snr(511);
3297 }
3298
3299 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
3300 ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
3301 ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
3302 ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
3303}
3304
3305static void r8152b_enable_eee(struct r8152 *tp)
3306{
3307 r8152_eee_en(tp, true);
3308 r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
3309}
3310
3311static void r8153_eee_en(struct r8152 *tp, bool enable)
3312{
3313 u32 ocp_data;
3314 u16 config;
3315
3316 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
3317 config = ocp_reg_read(tp, OCP_EEE_CFG);
3318
3319 if (enable) {
3320 ocp_data |= EEE_RX_EN | EEE_TX_EN;
3321 config |= EEE10_EN;
3322 } else {
3323 ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
3324 config &= ~EEE10_EN;
3325 }
3326
3327 ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
3328 ocp_reg_write(tp, OCP_EEE_CFG, config);
3329}
3330
3331static void r8153_enable_eee(struct r8152 *tp)
3332{
3333 r8153_eee_en(tp, true);
3334 ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
3335}
3336
3337static void r8152b_enable_fc(struct r8152 *tp)
3338{
3339 u16 anar;
3340
3341 anar = r8152_mdio_read(tp, MII_ADVERTISE);
3342 anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3343 r8152_mdio_write(tp, MII_ADVERTISE, anar);
3344}
3345
3346static void rtl_tally_reset(struct r8152 *tp) 3339static void rtl_tally_reset(struct r8152 *tp)
3347{ 3340{
3348 u32 ocp_data; 3341 u32 ocp_data;
@@ -3355,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp)
3355static void r8152b_init(struct r8152 *tp) 3348static void r8152b_init(struct r8152 *tp)
3356{ 3349{
3357 u32 ocp_data; 3350 u32 ocp_data;
3351 u16 data;
3358 3352
3359 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3353 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3360 return; 3354 return;
3361 3355
3356 data = r8152_mdio_read(tp, MII_BMCR);
3357 if (data & BMCR_PDOWN) {
3358 data &= ~BMCR_PDOWN;
3359 r8152_mdio_write(tp, MII_BMCR, data);
3360 }
3361
3362 r8152_aldps_en(tp, false); 3362 r8152_aldps_en(tp, false);
3363 3363
3364 if (tp->version == RTL_VER_01) { 3364 if (tp->version == RTL_VER_01) {
@@ -3380,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp)
3380 SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; 3380 SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
3381 ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); 3381 ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
3382 3382
3383 r8152b_enable_eee(tp);
3384 r8152_aldps_en(tp, true);
3385 r8152b_enable_fc(tp);
3386 rtl_tally_reset(tp); 3383 rtl_tally_reset(tp);
3387 3384
3388 /* enable rx aggregation */ 3385 /* enable rx aggregation */
@@ -3394,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp)
3394static void r8153_init(struct r8152 *tp) 3391static void r8153_init(struct r8152 *tp)
3395{ 3392{
3396 u32 ocp_data; 3393 u32 ocp_data;
3394 u16 data;
3397 int i; 3395 int i;
3398 3396
3399 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3397 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3400 return; 3398 return;
3401 3399
3402 r8153_aldps_en(tp, false);
3403 r8153_u1u2en(tp, false); 3400 r8153_u1u2en(tp, false);
3404 3401
3405 for (i = 0; i < 500; i++) { 3402 for (i = 0; i < 500; i++) {
@@ -3416,6 +3413,23 @@ static void r8153_init(struct r8152 *tp)
3416 msleep(20); 3413 msleep(20);
3417 } 3414 }
3418 3415
3416 if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
3417 tp->version == RTL_VER_05)
3418 ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
3419
3420 data = r8152_mdio_read(tp, MII_BMCR);
3421 if (data & BMCR_PDOWN) {
3422 data &= ~BMCR_PDOWN;
3423 r8152_mdio_write(tp, MII_BMCR, data);
3424 }
3425
3426 for (i = 0; i < 500; i++) {
3427 ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
3428 if (ocp_data == PHY_STAT_LAN_ON)
3429 break;
3430 msleep(20);
3431 }
3432
3419 usb_disable_lpm(tp->udev); 3433 usb_disable_lpm(tp->udev);
3420 r8153_u2p3en(tp, false); 3434 r8153_u2p3en(tp, false);
3421 3435
@@ -3483,9 +3497,6 @@ static void r8153_init(struct r8152 *tp)
3483 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); 3497 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
3484 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); 3498 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
3485 3499
3486 r8153_enable_eee(tp);
3487 r8153_aldps_en(tp, true);
3488 r8152b_enable_fc(tp);
3489 rtl_tally_reset(tp); 3500 rtl_tally_reset(tp);
3490 r8153_u2p3en(tp, true); 3501 r8153_u2p3en(tp, true);
3491} 3502}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index c6585ab48df3..b3a87a31de30 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -513,6 +513,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
513 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 513 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
514 int queue; 514 int queue;
515 515
516 /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
517 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
518 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
519 * and hence needs to be sent on the aux queue
520 */
521 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
522 skb_info->control.vif->type == NL80211_IFTYPE_STATION)
523 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
524
516 memcpy(&info, skb->cb, sizeof(info)); 525 memcpy(&info, skb->cb, sizeof(info));
517 526
518 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) 527 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
@@ -526,16 +535,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
526 /* This holds the amsdu headers length */ 535 /* This holds the amsdu headers length */
527 skb_info->driver_data[0] = (void *)(uintptr_t)0; 536 skb_info->driver_data[0] = (void *)(uintptr_t)0;
528 537
529 /*
530 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
531 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
532 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
533 * and hence needs to be sent on the aux queue
534 */
535 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
536 info.control.vif->type == NL80211_IFTYPE_STATION)
537 IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
538
539 queue = info.hw_queue; 538 queue = info.hw_queue;
540 539
541 /* 540 /*
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 6a31f2610c23..daf4c7867102 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
271 be->dev = dev; 271 be->dev = dev;
272 dev_set_drvdata(&dev->dev, be); 272 dev_set_drvdata(&dev->dev, be);
273 273
274 be->state = XenbusStateInitialising;
275 err = xenbus_switch_state(dev, XenbusStateInitialising);
276 if (err)
277 goto fail;
278
274 sg = 1; 279 sg = 1;
275 280
276 do { 281 do {
@@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
383 388
384 be->hotplug_script = script; 389 be->hotplug_script = script;
385 390
386 err = xenbus_switch_state(dev, XenbusStateInitWait);
387 if (err)
388 goto fail;
389
390 be->state = XenbusStateInitWait;
391 391
392 /* This kicks hotplug scripts, so do it immediately. */ 392 /* This kicks hotplug scripts, so do it immediately. */
393 err = backend_create_xenvif(be); 393 err = backend_create_xenvif(be);
@@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
492 492
493/* Handle backend state transitions: 493/* Handle backend state transitions:
494 * 494 *
495 * The backend state starts in InitWait and the following transitions are 495 * The backend state starts in Initialising and the following transitions are
496 * allowed. 496 * allowed.
497 * 497 *
498 * InitWait -> Connected 498 * Initialising -> InitWait -> Connected
499 * 499 * \
500 * ^ \ | 500 * \ ^ \ |
501 * | \ | 501 * \ | \ |
502 * | \ | 502 * \ | \ |
503 * | \ | 503 * \ | \ |
504 * | \ | 504 * \ | \ |
505 * | \ | 505 * \ | \ |
506 * | V V 506 * V | V V
507 * 507 *
508 * Closed <-> Closing 508 * Closed <-> Closing
509 * 509 *
510 * The state argument specifies the eventual state of the backend and the 510 * The state argument specifies the eventual state of the backend and the
511 * function transitions to that state via the shortest path. 511 * function transitions to that state via the shortest path.
@@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
515{ 515{
516 while (be->state != state) { 516 while (be->state != state) {
517 switch (be->state) { 517 switch (be->state) {
518 case XenbusStateInitialising:
519 switch (state) {
520 case XenbusStateInitWait:
521 case XenbusStateConnected:
522 case XenbusStateClosing:
523 backend_switch_state(be, XenbusStateInitWait);
524 break;
525 case XenbusStateClosed:
526 backend_switch_state(be, XenbusStateClosed);
527 break;
528 default:
529 BUG();
530 }
531 break;
518 case XenbusStateClosed: 532 case XenbusStateClosed:
519 switch (state) { 533 switch (state) {
520 case XenbusStateInitWait: 534 case XenbusStateInitWait:
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c2c2c28e6eb5..fbdb2267e460 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
561 561
562 queue = &ctrl->queues[idx]; 562 queue = &ctrl->queues[idx];
563 queue->ctrl = ctrl; 563 queue->ctrl = ctrl;
564 queue->flags = 0;
565 init_completion(&queue->cm_done); 564 init_completion(&queue->cm_done);
566 565
567 if (idx > 0) 566 if (idx > 0)
@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
595 goto out_destroy_cm_id; 594 goto out_destroy_cm_id;
596 } 595 }
597 596
597 clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
598 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); 598 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
599 599
600 return 0; 600 return 0;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bf40063de202..6d4b68c483f3 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
999 __u16, __u16, 999 __u16, __u16,
1000 enum qeth_prot_versions); 1000 enum qeth_prot_versions);
1001int qeth_set_features(struct net_device *, netdev_features_t); 1001int qeth_set_features(struct net_device *, netdev_features_t);
1002int qeth_recover_features(struct net_device *);
1002netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); 1003netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1003 1004
1004/* exports for OSN */ 1005/* exports for OSN */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7dba6c8537a1..20cf29613043 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
3619 int e; 3619 int e;
3620 3620
3621 e = 0; 3621 e = 0;
3622 while (buffer->element[e].addr) { 3622 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3623 buffer->element[e].addr) {
3623 unsigned long phys_aob_addr; 3624 unsigned long phys_aob_addr;
3624 3625
3625 phys_aob_addr = (unsigned long) buffer->element[e].addr; 3626 phys_aob_addr = (unsigned long) buffer->element[e].addr;
@@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
6131 return rc; 6132 return rc;
6132} 6133}
6133 6134
6135/* try to restore device features on a device after recovery */
6136int qeth_recover_features(struct net_device *dev)
6137{
6138 struct qeth_card *card = dev->ml_priv;
6139 netdev_features_t recover = dev->features;
6140
6141 if (recover & NETIF_F_IP_CSUM) {
6142 if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
6143 recover ^= NETIF_F_IP_CSUM;
6144 }
6145 if (recover & NETIF_F_RXCSUM) {
6146 if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
6147 recover ^= NETIF_F_RXCSUM;
6148 }
6149 if (recover & NETIF_F_TSO) {
6150 if (qeth_set_ipa_tso(card, 1))
6151 recover ^= NETIF_F_TSO;
6152 }
6153
6154 if (recover == dev->features)
6155 return 0;
6156
6157 dev_warn(&card->gdev->dev,
6158 "Device recovery failed to restore all offload features\n");
6159 dev->features = recover;
6160 return -EIO;
6161}
6162EXPORT_SYMBOL_GPL(qeth_recover_features);
6163
6134int qeth_set_features(struct net_device *dev, netdev_features_t features) 6164int qeth_set_features(struct net_device *dev, netdev_features_t features)
6135{ 6165{
6136 struct qeth_card *card = dev->ml_priv; 6166 struct qeth_card *card = dev->ml_priv;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7bc20c5188bc..bb27058fa9f0 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
1124 card->dev->hw_features |= NETIF_F_RXCSUM; 1124 card->dev->hw_features |= NETIF_F_RXCSUM;
1125 card->dev->vlan_features |= NETIF_F_RXCSUM; 1125 card->dev->vlan_features |= NETIF_F_RXCSUM;
1126 } 1126 }
1127 /* Turn on SG per default */
1128 card->dev->features |= NETIF_F_SG;
1129 } 1127 }
1130 card->info.broadcast_capable = 1; 1128 card->info.broadcast_capable = 1;
1131 qeth_l2_request_initial_mac(card); 1129 qeth_l2_request_initial_mac(card);
1132 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * 1130 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
1133 PAGE_SIZE; 1131 PAGE_SIZE;
1134 card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
1135 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 1132 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
1136 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); 1133 netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
1137 netif_carrier_off(card->dev); 1134 netif_carrier_off(card->dev);
@@ -1246,6 +1243,9 @@ contin:
1246 } 1243 }
1247 /* this also sets saved unicast addresses */ 1244 /* this also sets saved unicast addresses */
1248 qeth_l2_set_rx_mode(card->dev); 1245 qeth_l2_set_rx_mode(card->dev);
1246 rtnl_lock();
1247 qeth_recover_features(card->dev);
1248 rtnl_unlock();
1249 } 1249 }
1250 /* let user_space know that device is online */ 1250 /* let user_space know that device is online */
1251 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1251 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 72934666fedf..272d9e7419be 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
257 if (addr->in_progress) 257 if (addr->in_progress)
258 return -EINPROGRESS; 258 return -EINPROGRESS;
259 259
260 if (!qeth_card_hw_is_reachable(card)) {
261 addr->disp_flag = QETH_DISP_ADDR_DELETE;
262 return 0;
263 }
264
260 rc = qeth_l3_deregister_addr_entry(card, addr); 265 rc = qeth_l3_deregister_addr_entry(card, addr);
261 266
262 hash_del(&addr->hnode); 267 hash_del(&addr->hnode);
@@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
296 hash_add(card->ip_htable, &addr->hnode, 301 hash_add(card->ip_htable, &addr->hnode,
297 qeth_l3_ipaddr_hash(addr)); 302 qeth_l3_ipaddr_hash(addr));
298 303
304 if (!qeth_card_hw_is_reachable(card)) {
305 addr->disp_flag = QETH_DISP_ADDR_ADD;
306 return 0;
307 }
308
299 /* qeth_l3_register_addr_entry can go to sleep 309 /* qeth_l3_register_addr_entry can go to sleep
300 * if we add a IPV4 addr. It is caused by the reason 310 * if we add a IPV4 addr. It is caused by the reason
301 * that SETIP ipa cmd starts ARP staff for IPV4 addr. 311 * that SETIP ipa cmd starts ARP staff for IPV4 addr.
@@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
390 int i; 400 int i;
391 int rc; 401 int rc;
392 402
393 QETH_CARD_TEXT(card, 4, "recoverip"); 403 QETH_CARD_TEXT(card, 4, "recovrip");
394 404
395 spin_lock_bh(&card->ip_lock); 405 spin_lock_bh(&card->ip_lock);
396 406
397 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { 407 hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
398 if (addr->disp_flag == QETH_DISP_ADDR_ADD) { 408 if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
409 qeth_l3_deregister_addr_entry(card, addr);
410 hash_del(&addr->hnode);
411 kfree(addr);
412 } else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
399 if (addr->proto == QETH_PROT_IPV4) { 413 if (addr->proto == QETH_PROT_IPV4) {
400 addr->in_progress = 1; 414 addr->in_progress = 1;
401 spin_unlock_bh(&card->ip_lock); 415 spin_unlock_bh(&card->ip_lock);
@@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
407 421
408 if (!rc) { 422 if (!rc) {
409 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; 423 addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
410 if (addr->ref_counter < 1) { 424 if (addr->ref_counter < 1)
411 qeth_l3_delete_ip(card, addr); 425 qeth_l3_delete_ip(card, addr);
412 kfree(addr);
413 }
414 } else { 426 } else {
415 hash_del(&addr->hnode); 427 hash_del(&addr->hnode);
416 kfree(addr); 428 kfree(addr);
@@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
689 701
690 spin_lock_bh(&card->ip_lock); 702 spin_lock_bh(&card->ip_lock);
691 703
692 if (!qeth_l3_ip_from_hash(card, ipaddr)) 704 if (qeth_l3_ip_from_hash(card, ipaddr))
693 rc = -EEXIST; 705 rc = -EEXIST;
694 else 706 else
695 qeth_l3_add_ip(card, ipaddr); 707 qeth_l3_add_ip(card, ipaddr);
@@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
757 769
758 spin_lock_bh(&card->ip_lock); 770 spin_lock_bh(&card->ip_lock);
759 771
760 if (!qeth_l3_ip_from_hash(card, ipaddr)) 772 if (qeth_l3_ip_from_hash(card, ipaddr))
761 rc = -EEXIST; 773 rc = -EEXIST;
762 else 774 else
763 qeth_l3_add_ip(card, ipaddr); 775 qeth_l3_add_ip(card, ipaddr);
@@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3108 card->dev->vlan_features = NETIF_F_SG | 3120 card->dev->vlan_features = NETIF_F_SG |
3109 NETIF_F_RXCSUM | NETIF_F_IP_CSUM | 3121 NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
3110 NETIF_F_TSO; 3122 NETIF_F_TSO;
3111 card->dev->features = NETIF_F_SG;
3112 } 3123 }
3113 } 3124 }
3114 } else if (card->info.type == QETH_CARD_TYPE_IQD) { 3125 } else if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3136 netif_keep_dst(card->dev); 3147 netif_keep_dst(card->dev);
3137 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * 3148 card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
3138 PAGE_SIZE; 3149 PAGE_SIZE;
3139 card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
3140 3150
3141 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3151 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3142 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); 3152 netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@@ -3269,6 +3279,7 @@ contin:
3269 else 3279 else
3270 dev_open(card->dev); 3280 dev_open(card->dev);
3271 qeth_l3_set_multicast_list(card->dev); 3281 qeth_l3_set_multicast_list(card->dev);
3282 qeth_recover_features(card->dev);
3272 rtnl_unlock(); 3283 rtnl_unlock();
3273 } 3284 }
3274 qeth_trace_features(card); 3285 qeth_trace_features(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 65645b11fc19..0e00a5ce0f00 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
297 addr->u.a6.pfxlen = 0; 297 addr->u.a6.pfxlen = 0;
298 addr->type = QETH_IP_TYPE_NORMAL; 298 addr->type = QETH_IP_TYPE_NORMAL;
299 299
300 spin_lock_bh(&card->ip_lock);
300 qeth_l3_delete_ip(card, addr); 301 qeth_l3_delete_ip(card, addr);
302 spin_unlock_bh(&card->ip_lock);
301 kfree(addr); 303 kfree(addr);
302 } 304 }
303 305
@@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
329 addr->type = QETH_IP_TYPE_NORMAL; 331 addr->type = QETH_IP_TYPE_NORMAL;
330 } else 332 } else
331 return -ENOMEM; 333 return -ENOMEM;
334
335 spin_lock_bh(&card->ip_lock);
332 qeth_l3_add_ip(card, addr); 336 qeth_l3_add_ip(card, addr);
337 spin_unlock_bh(&card->ip_lock);
333 kfree(addr); 338 kfree(addr);
334 339
335 return count; 340 return count;
diff --git a/drivers/staging/media/cec/TODO b/drivers/staging/media/cec/TODO
index a10d4f82b954..13224694a8ae 100644
--- a/drivers/staging/media/cec/TODO
+++ b/drivers/staging/media/cec/TODO
@@ -12,6 +12,7 @@ Hopefully this will happen later in 2016.
12 12
13Other TODOs: 13Other TODOs:
14 14
15- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
15- Add a flag to inhibit passing CEC RC messages to the rc subsystem. 16- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
16 Applications should be able to choose this when calling S_LOG_ADDRS. 17 Applications should be able to choose this when calling S_LOG_ADDRS.
17- If the reply field of cec_msg is set then when the reply arrives it 18- If the reply field of cec_msg is set then when the reply arrives it
diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
index b2393bbacb26..946986f3ac0d 100644
--- a/drivers/staging/media/cec/cec-adap.c
+++ b/drivers/staging/media/cec/cec-adap.c
@@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap,
124 u64 ts = ktime_get_ns(); 124 u64 ts = ktime_get_ns();
125 struct cec_fh *fh; 125 struct cec_fh *fh;
126 126
127 mutex_lock(&adap->devnode.fhs_lock); 127 mutex_lock(&adap->devnode.lock);
128 list_for_each_entry(fh, &adap->devnode.fhs, list) 128 list_for_each_entry(fh, &adap->devnode.fhs, list)
129 cec_queue_event_fh(fh, ev, ts); 129 cec_queue_event_fh(fh, ev, ts);
130 mutex_unlock(&adap->devnode.fhs_lock); 130 mutex_unlock(&adap->devnode.lock);
131} 131}
132 132
133/* 133/*
@@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
191 u32 monitor_mode = valid_la ? CEC_MODE_MONITOR : 191 u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
192 CEC_MODE_MONITOR_ALL; 192 CEC_MODE_MONITOR_ALL;
193 193
194 mutex_lock(&adap->devnode.fhs_lock); 194 mutex_lock(&adap->devnode.lock);
195 list_for_each_entry(fh, &adap->devnode.fhs, list) { 195 list_for_each_entry(fh, &adap->devnode.fhs, list) {
196 if (fh->mode_follower >= monitor_mode) 196 if (fh->mode_follower >= monitor_mode)
197 cec_queue_msg_fh(fh, msg); 197 cec_queue_msg_fh(fh, msg);
198 } 198 }
199 mutex_unlock(&adap->devnode.fhs_lock); 199 mutex_unlock(&adap->devnode.lock);
200} 200}
201 201
202/* 202/*
@@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
207{ 207{
208 struct cec_fh *fh; 208 struct cec_fh *fh;
209 209
210 mutex_lock(&adap->devnode.fhs_lock); 210 mutex_lock(&adap->devnode.lock);
211 list_for_each_entry(fh, &adap->devnode.fhs, list) { 211 list_for_each_entry(fh, &adap->devnode.fhs, list) {
212 if (fh->mode_follower == CEC_MODE_FOLLOWER) 212 if (fh->mode_follower == CEC_MODE_FOLLOWER)
213 cec_queue_msg_fh(fh, msg); 213 cec_queue_msg_fh(fh, msg);
214 } 214 }
215 mutex_unlock(&adap->devnode.fhs_lock); 215 mutex_unlock(&adap->devnode.lock);
216} 216}
217 217
218/* Notify userspace of an adapter state change. */ 218/* Notify userspace of an adapter state change. */
@@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
851 if (!valid_la || msg->len <= 1) 851 if (!valid_la || msg->len <= 1)
852 return; 852 return;
853 853
854 if (adap->log_addrs.log_addr_mask == 0)
855 return;
856
854 /* 857 /*
855 * Process the message on the protocol level. If is_reply is true, 858 * Process the message on the protocol level. If is_reply is true,
856 * then cec_receive_notify() won't pass on the reply to the listener(s) 859 * then cec_receive_notify() won't pass on the reply to the listener(s)
@@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg)
1047 dprintk(1, "could not claim LA %d\n", i); 1050 dprintk(1, "could not claim LA %d\n", i);
1048 } 1051 }
1049 1052
1053 if (adap->log_addrs.log_addr_mask == 0 &&
1054 !(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
1055 goto unconfigure;
1056
1050configured: 1057configured:
1051 if (adap->log_addrs.log_addr_mask == 0) { 1058 if (adap->log_addrs.log_addr_mask == 0) {
1052 /* Fall back to unregistered */ 1059 /* Fall back to unregistered */
1053 las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED; 1060 las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
1054 las->log_addr_mask = 1 << las->log_addr[0]; 1061 las->log_addr_mask = 1 << las->log_addr[0];
1062 for (i = 1; i < las->num_log_addrs; i++)
1063 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1055 } 1064 }
1056 adap->is_configured = true; 1065 adap->is_configured = true;
1057 adap->is_configuring = false; 1066 adap->is_configuring = false;
@@ -1070,6 +1079,8 @@ configured:
1070 cec_report_features(adap, i); 1079 cec_report_features(adap, i);
1071 cec_report_phys_addr(adap, i); 1080 cec_report_phys_addr(adap, i);
1072 } 1081 }
1082 for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
1083 las->log_addr[i] = CEC_LOG_ADDR_INVALID;
1073 mutex_lock(&adap->lock); 1084 mutex_lock(&adap->lock);
1074 adap->kthread_config = NULL; 1085 adap->kthread_config = NULL;
1075 mutex_unlock(&adap->lock); 1086 mutex_unlock(&adap->lock);
@@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1398 u8 init_laddr = cec_msg_initiator(msg); 1409 u8 init_laddr = cec_msg_initiator(msg);
1399 u8 devtype = cec_log_addr2dev(adap, dest_laddr); 1410 u8 devtype = cec_log_addr2dev(adap, dest_laddr);
1400 int la_idx = cec_log_addr2idx(adap, dest_laddr); 1411 int la_idx = cec_log_addr2idx(adap, dest_laddr);
1401 bool is_directed = la_idx >= 0;
1402 bool from_unregistered = init_laddr == 0xf; 1412 bool from_unregistered = init_laddr == 0xf;
1403 struct cec_msg tx_cec_msg = { }; 1413 struct cec_msg tx_cec_msg = { };
1404 1414
@@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
1560 * Unprocessed messages are aborted if userspace isn't doing 1570 * Unprocessed messages are aborted if userspace isn't doing
1561 * any processing either. 1571 * any processing either.
1562 */ 1572 */
1563 if (is_directed && !is_reply && !adap->follower_cnt && 1573 if (!is_broadcast && !is_reply && !adap->follower_cnt &&
1564 !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT) 1574 !adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
1565 return cec_feature_abort(adap, msg); 1575 return cec_feature_abort(adap, msg);
1566 break; 1576 break;
diff --git a/drivers/staging/media/cec/cec-api.c b/drivers/staging/media/cec/cec-api.c
index 7be7615a0fdf..e274e2f22398 100644
--- a/drivers/staging/media/cec/cec-api.c
+++ b/drivers/staging/media/cec/cec-api.c
@@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
162 return -ENOTTY; 162 return -ENOTTY;
163 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs))) 163 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
164 return -EFAULT; 164 return -EFAULT;
165 log_addrs.flags = 0; 165 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
166 mutex_lock(&adap->lock); 166 mutex_lock(&adap->lock);
167 if (!adap->is_configuring && 167 if (!adap->is_configuring &&
168 (!log_addrs.num_log_addrs || !adap->is_configured) && 168 (!log_addrs.num_log_addrs || !adap->is_configured) &&
@@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
435 void __user *parg = (void __user *)arg; 435 void __user *parg = (void __user *)arg;
436 436
437 if (!devnode->registered) 437 if (!devnode->registered)
438 return -EIO; 438 return -ENODEV;
439 439
440 switch (cmd) { 440 switch (cmd) {
441 case CEC_ADAP_G_CAPS: 441 case CEC_ADAP_G_CAPS:
@@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp)
508 508
509 filp->private_data = fh; 509 filp->private_data = fh;
510 510
511 mutex_lock(&devnode->fhs_lock); 511 mutex_lock(&devnode->lock);
512 /* Queue up initial state events */ 512 /* Queue up initial state events */
513 ev_state.state_change.phys_addr = adap->phys_addr; 513 ev_state.state_change.phys_addr = adap->phys_addr;
514 ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask; 514 ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
515 cec_queue_event_fh(fh, &ev_state, 0); 515 cec_queue_event_fh(fh, &ev_state, 0);
516 516
517 list_add(&fh->list, &devnode->fhs); 517 list_add(&fh->list, &devnode->fhs);
518 mutex_unlock(&devnode->fhs_lock); 518 mutex_unlock(&devnode->lock);
519 519
520 return 0; 520 return 0;
521} 521}
@@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp)
540 cec_monitor_all_cnt_dec(adap); 540 cec_monitor_all_cnt_dec(adap);
541 mutex_unlock(&adap->lock); 541 mutex_unlock(&adap->lock);
542 542
543 mutex_lock(&devnode->fhs_lock); 543 mutex_lock(&devnode->lock);
544 list_del(&fh->list); 544 list_del(&fh->list);
545 mutex_unlock(&devnode->fhs_lock); 545 mutex_unlock(&devnode->lock);
546 546
547 /* Unhook pending transmits from this filehandle. */ 547 /* Unhook pending transmits from this filehandle. */
548 mutex_lock(&adap->lock); 548 mutex_lock(&adap->lock);
diff --git a/drivers/staging/media/cec/cec-core.c b/drivers/staging/media/cec/cec-core.c
index 112a5fae12f5..3b1e4d2b190d 100644
--- a/drivers/staging/media/cec/cec-core.c
+++ b/drivers/staging/media/cec/cec-core.c
@@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode)
51{ 51{
52 /* 52 /*
53 * Check if the cec device is available. This needs to be done with 53 * Check if the cec device is available. This needs to be done with
54 * the cec_devnode_lock held to prevent an open/unregister race: 54 * the devnode->lock held to prevent an open/unregister race:
55 * without the lock, the device could be unregistered and freed between 55 * without the lock, the device could be unregistered and freed between
56 * the devnode->registered check and get_device() calls, leading to 56 * the devnode->registered check and get_device() calls, leading to
57 * a crash. 57 * a crash.
58 */ 58 */
59 mutex_lock(&cec_devnode_lock); 59 mutex_lock(&devnode->lock);
60 /* 60 /*
61 * return ENXIO if the cec device has been removed 61 * return ENXIO if the cec device has been removed
62 * already or if it is not registered anymore. 62 * already or if it is not registered anymore.
63 */ 63 */
64 if (!devnode->registered) { 64 if (!devnode->registered) {
65 mutex_unlock(&cec_devnode_lock); 65 mutex_unlock(&devnode->lock);
66 return -ENXIO; 66 return -ENXIO;
67 } 67 }
68 /* and increase the device refcount */ 68 /* and increase the device refcount */
69 get_device(&devnode->dev); 69 get_device(&devnode->dev);
70 mutex_unlock(&cec_devnode_lock); 70 mutex_unlock(&devnode->lock);
71 return 0; 71 return 0;
72} 72}
73 73
74void cec_put_device(struct cec_devnode *devnode) 74void cec_put_device(struct cec_devnode *devnode)
75{ 75{
76 mutex_lock(&cec_devnode_lock);
77 put_device(&devnode->dev); 76 put_device(&devnode->dev);
78 mutex_unlock(&cec_devnode_lock);
79} 77}
80 78
81/* Called when the last user of the cec device exits. */ 79/* Called when the last user of the cec device exits. */
@@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd)
84 struct cec_devnode *devnode = to_cec_devnode(cd); 82 struct cec_devnode *devnode = to_cec_devnode(cd);
85 83
86 mutex_lock(&cec_devnode_lock); 84 mutex_lock(&cec_devnode_lock);
87
88 /* Mark device node number as free */ 85 /* Mark device node number as free */
89 clear_bit(devnode->minor, cec_devnode_nums); 86 clear_bit(devnode->minor, cec_devnode_nums);
90
91 mutex_unlock(&cec_devnode_lock); 87 mutex_unlock(&cec_devnode_lock);
88
92 cec_delete_adapter(to_cec_adapter(devnode)); 89 cec_delete_adapter(to_cec_adapter(devnode));
93} 90}
94 91
@@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
117 114
118 /* Initialization */ 115 /* Initialization */
119 INIT_LIST_HEAD(&devnode->fhs); 116 INIT_LIST_HEAD(&devnode->fhs);
120 mutex_init(&devnode->fhs_lock); 117 mutex_init(&devnode->lock);
121 118
122 /* Part 1: Find a free minor number */ 119 /* Part 1: Find a free minor number */
123 mutex_lock(&cec_devnode_lock); 120 mutex_lock(&cec_devnode_lock);
@@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
160cdev_del: 157cdev_del:
161 cdev_del(&devnode->cdev); 158 cdev_del(&devnode->cdev);
162clr_bit: 159clr_bit:
160 mutex_lock(&cec_devnode_lock);
163 clear_bit(devnode->minor, cec_devnode_nums); 161 clear_bit(devnode->minor, cec_devnode_nums);
162 mutex_unlock(&cec_devnode_lock);
164 return ret; 163 return ret;
165} 164}
166 165
@@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
177{ 176{
178 struct cec_fh *fh; 177 struct cec_fh *fh;
179 178
179 mutex_lock(&devnode->lock);
180
180 /* Check if devnode was never registered or already unregistered */ 181 /* Check if devnode was never registered or already unregistered */
181 if (!devnode->registered || devnode->unregistered) 182 if (!devnode->registered || devnode->unregistered) {
183 mutex_unlock(&devnode->lock);
182 return; 184 return;
185 }
183 186
184 mutex_lock(&devnode->fhs_lock);
185 list_for_each_entry(fh, &devnode->fhs, list) 187 list_for_each_entry(fh, &devnode->fhs, list)
186 wake_up_interruptible(&fh->wait); 188 wake_up_interruptible(&fh->wait);
187 mutex_unlock(&devnode->fhs_lock);
188 189
189 devnode->registered = false; 190 devnode->registered = false;
190 devnode->unregistered = true; 191 devnode->unregistered = true;
192 mutex_unlock(&devnode->lock);
193
191 device_del(&devnode->dev); 194 device_del(&devnode->dev);
192 cdev_del(&devnode->cdev); 195 cdev_del(&devnode->cdev);
193 put_device(&devnode->dev); 196 put_device(&devnode->dev);
diff --git a/drivers/staging/media/pulse8-cec/pulse8-cec.c b/drivers/staging/media/pulse8-cec/pulse8-cec.c
index 94f8590492dc..ed8bd95ad6d0 100644
--- a/drivers/staging/media/pulse8-cec/pulse8-cec.c
+++ b/drivers/staging/media/pulse8-cec/pulse8-cec.c
@@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work)
114 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK, 114 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
115 0, 0, 0, 0); 115 0, 0, 0, 0);
116 break; 116 break;
117 case MSGCODE_TRANSMIT_FAILED_LINE:
118 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST,
119 1, 0, 0, 0);
120 break;
121 case MSGCODE_TRANSMIT_FAILED_ACK: 117 case MSGCODE_TRANSMIT_FAILED_ACK:
122 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK, 118 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
123 0, 1, 0, 0); 119 0, 1, 0, 0);
124 break; 120 break;
121 case MSGCODE_TRANSMIT_FAILED_LINE:
125 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA: 122 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
126 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: 123 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
127 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR, 124 cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
@@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
170 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE: 167 case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
171 schedule_work(&pulse8->work); 168 schedule_work(&pulse8->work);
172 break; 169 break;
170 case MSGCODE_HIGH_ERROR:
171 case MSGCODE_LOW_ERROR:
172 case MSGCODE_RECEIVE_FAILED:
173 case MSGCODE_TIMEOUT_ERROR: 173 case MSGCODE_TIMEOUT_ERROR:
174 break; 174 break;
175 case MSGCODE_COMMAND_ACCEPTED: 175 case MSGCODE_COMMAND_ACCEPTED:
@@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
388 int err; 388 int err;
389 389
390 cmd[0] = MSGCODE_TRANSMIT_IDLETIME; 390 cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
391 cmd[1] = 3; 391 cmd[1] = signal_free_time;
392 err = pulse8_send_and_wait(pulse8, cmd, 2, 392 err = pulse8_send_and_wait(pulse8, cmd, 2,
393 MSGCODE_COMMAND_ACCEPTED, 1); 393 MSGCODE_COMMAND_ACCEPTED, 1);
394 cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY; 394 cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 38c2df84cabd..665da8f66ff1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4271,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4271 if (ret < 0) 4271 if (ret < 0)
4272 return ret; 4272 return ret;
4273 4273
4274 /* 4274 /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4275 * Use new btrfs_qgroup_reserve_data to reserve precious data space
4276 *
4277 * TODO: Find a good method to avoid reserve data space for NOCOW
4278 * range, but don't impact performance on quota disable case.
4279 */
4280 ret = btrfs_qgroup_reserve_data(inode, start, len); 4275 ret = btrfs_qgroup_reserve_data(inode, start, len);
4276 if (ret)
4277 btrfs_free_reserved_data_space_noquota(inode, start, len);
4281 return ret; 4278 return ret;
4282} 4279}
4283 4280
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b2a2da5893af..7fd939bfbd99 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1634 int namelen; 1634 int namelen;
1635 int ret = 0; 1635 int ret = 0;
1636 1636
1637 if (!S_ISDIR(file_inode(file)->i_mode))
1638 return -ENOTDIR;
1639
1637 ret = mnt_want_write_file(file); 1640 ret = mnt_want_write_file(file);
1638 if (ret) 1641 if (ret)
1639 goto out; 1642 goto out;
@@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
1691 struct btrfs_ioctl_vol_args *vol_args; 1694 struct btrfs_ioctl_vol_args *vol_args;
1692 int ret; 1695 int ret;
1693 1696
1697 if (!S_ISDIR(file_inode(file)->i_mode))
1698 return -ENOTDIR;
1699
1694 vol_args = memdup_user(arg, sizeof(*vol_args)); 1700 vol_args = memdup_user(arg, sizeof(*vol_args));
1695 if (IS_ERR(vol_args)) 1701 if (IS_ERR(vol_args))
1696 return PTR_ERR(vol_args); 1702 return PTR_ERR(vol_args);
@@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1714 bool readonly = false; 1720 bool readonly = false;
1715 struct btrfs_qgroup_inherit *inherit = NULL; 1721 struct btrfs_qgroup_inherit *inherit = NULL;
1716 1722
1723 if (!S_ISDIR(file_inode(file)->i_mode))
1724 return -ENOTDIR;
1725
1717 vol_args = memdup_user(arg, sizeof(*vol_args)); 1726 vol_args = memdup_user(arg, sizeof(*vol_args));
1718 if (IS_ERR(vol_args)) 1727 if (IS_ERR(vol_args))
1719 return PTR_ERR(vol_args); 1728 return PTR_ERR(vol_args);
@@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2357 int ret; 2366 int ret;
2358 int err = 0; 2367 int err = 0;
2359 2368
2369 if (!S_ISDIR(dir->i_mode))
2370 return -ENOTDIR;
2371
2360 vol_args = memdup_user(arg, sizeof(*vol_args)); 2372 vol_args = memdup_user(arg, sizeof(*vol_args));
2361 if (IS_ERR(vol_args)) 2373 if (IS_ERR(vol_args))
2362 return PTR_ERR(vol_args); 2374 return PTR_ERR(vol_args);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index c30cf49b69d2..2c6312db8516 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
333 if (bin_attr->cb_max_size && 333 if (bin_attr->cb_max_size &&
334 *ppos + count > bin_attr->cb_max_size) { 334 *ppos + count > bin_attr->cb_max_size) {
335 len = -EFBIG; 335 len = -EFBIG;
336 goto out;
336 } 337 }
337 338
338 tbuf = vmalloc(*ppos + count); 339 tbuf = vmalloc(*ppos + count);
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
index 82c3d3b7269d..138bbf721e70 100644
--- a/include/linux/cec-funcs.h
+++ b/include/linux/cec-funcs.h
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
162 162
163 163
164/* One Touch Record Feature */ 164/* One Touch Record Feature */
165static inline void cec_msg_record_off(struct cec_msg *msg) 165static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
166{ 166{
167 msg->len = 2; 167 msg->len = 2;
168 msg->msg[1] = CEC_MSG_RECORD_OFF; 168 msg->msg[1] = CEC_MSG_RECORD_OFF;
169 msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
169} 170}
170 171
171struct cec_op_arib_data { 172struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
227 if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) { 228 if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
228 *msg++ = (digital->channel.channel_number_fmt << 2) | 229 *msg++ = (digital->channel.channel_number_fmt << 2) |
229 (digital->channel.major >> 8); 230 (digital->channel.major >> 8);
230 *msg++ = digital->channel.major && 0xff; 231 *msg++ = digital->channel.major & 0xff;
231 *msg++ = digital->channel.minor >> 8; 232 *msg++ = digital->channel.minor >> 8;
232 *msg++ = digital->channel.minor & 0xff; 233 *msg++ = digital->channel.minor & 0xff;
233 *msg++ = 0; 234 *msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
323} 324}
324 325
325static inline void cec_msg_record_on(struct cec_msg *msg, 326static inline void cec_msg_record_on(struct cec_msg *msg,
327 bool reply,
326 const struct cec_op_record_src *rec_src) 328 const struct cec_op_record_src *rec_src)
327{ 329{
328 switch (rec_src->type) { 330 switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
346 rec_src->ext_phys_addr.phys_addr); 348 rec_src->ext_phys_addr.phys_addr);
347 break; 349 break;
348 } 350 }
351 msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
349} 352}
350 353
351static inline void cec_ops_record_on(const struct cec_msg *msg, 354static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
1141 msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0; 1144 msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
1142} 1145}
1143 1146
1147static inline void cec_msg_vendor_command(struct cec_msg *msg,
1148 __u8 size, const __u8 *vendor_cmd)
1149{
1150 if (size > 14)
1151 size = 14;
1152 msg->len = 2 + size;
1153 msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
1154 memcpy(msg->msg + 2, vendor_cmd, size);
1155}
1156
1157static inline void cec_ops_vendor_command(const struct cec_msg *msg,
1158 __u8 *size,
1159 const __u8 **vendor_cmd)
1160{
1161 *size = msg->len - 2;
1162
1163 if (*size > 14)
1164 *size = 14;
1165 *vendor_cmd = msg->msg + 2;
1166}
1167
1168static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
1169 __u32 vendor_id, __u8 size,
1170 const __u8 *vendor_cmd)
1171{
1172 if (size > 11)
1173 size = 11;
1174 msg->len = 5 + size;
1175 msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
1176 msg->msg[2] = vendor_id >> 16;
1177 msg->msg[3] = (vendor_id >> 8) & 0xff;
1178 msg->msg[4] = vendor_id & 0xff;
1179 memcpy(msg->msg + 5, vendor_cmd, size);
1180}
1181
1182static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
1183 __u32 *vendor_id, __u8 *size,
1184 const __u8 **vendor_cmd)
1185{
1186 *size = msg->len - 5;
1187
1188 if (*size > 11)
1189 *size = 11;
1190 *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
1191 *vendor_cmd = msg->msg + 5;
1192}
1193
1194static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
1195 __u8 size,
1196 const __u8 *rc_code)
1197{
1198 if (size > 14)
1199 size = 14;
1200 msg->len = 2 + size;
1201 msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
1202 memcpy(msg->msg + 2, rc_code, size);
1203}
1204
1205static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
1206 __u8 *size,
1207 const __u8 **rc_code)
1208{
1209 *size = msg->len - 2;
1210
1211 if (*size > 14)
1212 *size = 14;
1213 *rc_code = msg->msg + 2;
1214}
1215
1144static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg) 1216static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
1145{ 1217{
1146 msg->len = 2; 1218 msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
1277 msg->len += 4; 1349 msg->len += 4;
1278 msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) | 1350 msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
1279 (ui_cmd->channel_identifier.major >> 8); 1351 (ui_cmd->channel_identifier.major >> 8);
1280 msg->msg[4] = ui_cmd->channel_identifier.major && 0xff; 1352 msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
1281 msg->msg[5] = ui_cmd->channel_identifier.minor >> 8; 1353 msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
1282 msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff; 1354 msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
1283 break; 1355 break;
diff --git a/include/linux/cec.h b/include/linux/cec.h
index b3e22893a002..851968e803fa 100644
--- a/include/linux/cec.h
+++ b/include/linux/cec.h
@@ -364,7 +364,7 @@ struct cec_caps {
364 * @num_log_addrs: how many logical addresses should be claimed. Set by the 364 * @num_log_addrs: how many logical addresses should be claimed. Set by the
365 * caller. 365 * caller.
366 * @vendor_id: the vendor ID of the device. Set by the caller. 366 * @vendor_id: the vendor ID of the device. Set by the caller.
367 * @flags: set to 0. 367 * @flags: flags.
368 * @osd_name: the OSD name of the device. Set by the caller. 368 * @osd_name: the OSD name of the device. Set by the caller.
369 * @primary_device_type: the primary device type for each logical address. 369 * @primary_device_type: the primary device type for each logical address.
370 * Set by the caller. 370 * Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
389 __u8 features[CEC_MAX_LOG_ADDRS][12]; 389 __u8 features[CEC_MAX_LOG_ADDRS][12];
390}; 390};
391 391
392/* Allow a fallback to unregistered */
393#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
394
392/* Events */ 395/* Events */
393 396
394/* Event that occurs when the adapter state changes */ 397/* Event that occurs when the adapter state changes */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 66533e18276c..dc69df04abc1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
718#define dma_mmap_writecombine dma_mmap_wc 718#define dma_mmap_writecombine dma_mmap_wc
719#endif 719#endif
720 720
721#ifdef CONFIG_NEED_DMA_MAP_STATE 721#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7e3d53753612..01e84436cddf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -620,6 +620,7 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
620 return __get_user(c, end); 620 return __get_user(c, end);
621 } 621 }
622 622
623 (void)c;
623 return 0; 624 return 0;
624} 625}
625 626
diff --git a/include/media/cec.h b/include/media/cec.h
index dc7854b855f3..fdb5d600e4bb 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -57,8 +57,8 @@ struct cec_devnode {
57 int minor; 57 int minor;
58 bool registered; 58 bool registered;
59 bool unregistered; 59 bool unregistered;
60 struct mutex fhs_lock;
61 struct list_head fhs; 60 struct list_head fhs;
61 struct mutex lock;
62}; 62};
63 63
64struct cec_adapter; 64struct cec_adapter;
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 6793614e6502..e6937318546c 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
27#endif 27#endif
28} 28}
29 29
30static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
31 const struct nf_conn *tmpl)
32{
33 if (tmpl && nfct_synproxy(tmpl)) {
34 if (!nfct_seqadj_ext_add(ct))
35 return false;
36
37 if (!nfct_synproxy_ext_add(ct))
38 return false;
39 }
40
41 return true;
42}
43
30struct synproxy_stats { 44struct synproxy_stats {
31 unsigned int syn_received; 45 unsigned int syn_received;
32 unsigned int cookie_invalid; 46 unsigned int cookie_invalid;
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index efc01743b9d6..bafe2a0ab908 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -382,7 +382,7 @@ enum {
382 ADDIP_SERIAL_SIGN_BIT = (1<<31) 382 ADDIP_SERIAL_SIGN_BIT = (1<<31)
383}; 383};
384 384
385static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) 385static inline int ADDIP_SERIAL_gte(__u32 s, __u32 t)
386{ 386{
387 return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT); 387 return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
388} 388}
diff --git a/include/net/sock.h b/include/net/sock.h
index ff5be7e8ddea..8741988e6880 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1332,6 +1332,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
1332 if (!sk_has_account(sk)) 1332 if (!sk_has_account(sk))
1333 return; 1333 return;
1334 sk->sk_forward_alloc += size; 1334 sk->sk_forward_alloc += size;
1335
1336 /* Avoid a possible overflow.
1337 * TCP send queues can make this happen, if sk_mem_reclaim()
1338 * is not called and more than 2 GBytes are released at once.
1339 *
1340 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
1341 * no need to hold that much forward allocation anyway.
1342 */
1343 if (unlikely(sk->sk_forward_alloc >= 1 << 21))
1344 __sk_mem_reclaim(sk, 1 << 20);
1335} 1345}
1336 1346
1337static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 1347static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index adfebd6f243c..17934312eecb 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1540void xfrm4_local_error(struct sk_buff *skb, u32 mtu); 1540void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1541int xfrm6_extract_header(struct sk_buff *skb); 1541int xfrm6_extract_header(struct sk_buff *skb);
1542int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1542int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1543int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); 1543int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1544 struct ip6_tnl *t);
1544int xfrm6_transport_finish(struct sk_buff *skb, int async); 1545int xfrm6_transport_finish(struct sk_buff *skb, int async);
1546int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1545int xfrm6_rcv(struct sk_buff *skb); 1547int xfrm6_rcv(struct sk_buff *skb);
1546int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 1548int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1547 xfrm_address_t *saddr, u8 proto); 1549 xfrm_address_t *saddr, u8 proto);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5e8dab5bf9ad..d6b729beba49 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3446,9 +3446,28 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3446 * Except for the root, subtree_control must be zero for a cgroup 3446 * Except for the root, subtree_control must be zero for a cgroup
3447 * with tasks so that child cgroups don't compete against tasks. 3447 * with tasks so that child cgroups don't compete against tasks.
3448 */ 3448 */
3449 if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) { 3449 if (enable && cgroup_parent(cgrp)) {
3450 ret = -EBUSY; 3450 struct cgrp_cset_link *link;
3451 goto out_unlock; 3451
3452 /*
3453 * Because namespaces pin csets too, @cgrp->cset_links
3454 * might not be empty even when @cgrp is empty. Walk and
3455 * verify each cset.
3456 */
3457 spin_lock_irq(&css_set_lock);
3458
3459 ret = 0;
3460 list_for_each_entry(link, &cgrp->cset_links, cset_link) {
3461 if (css_set_populated(link->cset)) {
3462 ret = -EBUSY;
3463 break;
3464 }
3465 }
3466
3467 spin_unlock_irq(&css_set_lock);
3468
3469 if (ret)
3470 goto out_unlock;
3452 } 3471 }
3453 3472
3454 /* save and update control masks and prepare csses */ 3473 /* save and update control masks and prepare csses */
@@ -3899,7 +3918,9 @@ void cgroup_file_notify(struct cgroup_file *cfile)
3899 * cgroup_task_count - count the number of tasks in a cgroup. 3918 * cgroup_task_count - count the number of tasks in a cgroup.
3900 * @cgrp: the cgroup in question 3919 * @cgrp: the cgroup in question
3901 * 3920 *
3902 * Return the number of tasks in the cgroup. 3921 * Return the number of tasks in the cgroup. The returned number can be
3922 * higher than the actual number of tasks due to css_set references from
3923 * namespace roots and temporary usages.
3903 */ 3924 */
3904static int cgroup_task_count(const struct cgroup *cgrp) 3925static int cgroup_task_count(const struct cgroup *cgrp)
3905{ 3926{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c27e53326bef..2b4c20ab5bbe 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = {
325/* 325/*
326 * Return in pmask the portion of a cpusets's cpus_allowed that 326 * Return in pmask the portion of a cpusets's cpus_allowed that
327 * are online. If none are online, walk up the cpuset hierarchy 327 * are online. If none are online, walk up the cpuset hierarchy
328 * until we find one that does have some online cpus. The top 328 * until we find one that does have some online cpus.
329 * cpuset always has some cpus online.
330 * 329 *
331 * One way or another, we guarantee to return some non-empty subset 330 * One way or another, we guarantee to return some non-empty subset
332 * of cpu_online_mask. 331 * of cpu_online_mask.
@@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = {
335 */ 334 */
336static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) 335static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
337{ 336{
338 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) 337 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
339 cs = parent_cs(cs); 338 cs = parent_cs(cs);
339 if (unlikely(!cs)) {
340 /*
341 * The top cpuset doesn't have any online cpu as a
342 * consequence of a race between cpuset_hotplug_work
343 * and cpu hotplug notifier. But we know the top
344 * cpuset's effective_cpus is on its way to to be
345 * identical to cpu_online_mask.
346 */
347 cpumask_copy(pmask, cpu_online_mask);
348 return;
349 }
350 }
340 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); 351 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
341} 352}
342 353
@@ -2074,7 +2085,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2074 * which could have been changed by cpuset just after it inherits the 2085 * which could have been changed by cpuset just after it inherits the
2075 * state from the parent and before it sits on the cgroup's task list. 2086 * state from the parent and before it sits on the cgroup's task list.
2076 */ 2087 */
2077void cpuset_fork(struct task_struct *task) 2088static void cpuset_fork(struct task_struct *task)
2078{ 2089{
2079 if (task_css_is_root(task, cpuset_cgrp_id)) 2090 if (task_css_is_root(task, cpuset_cgrp_id))
2080 return; 2091 return;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a54f2c2cdb20..fc9bb2225291 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3929,7 +3929,7 @@ static void exclusive_event_destroy(struct perf_event *event)
3929 3929
3930static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3930static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3931{ 3931{
3932 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3932 if ((e1->pmu == e2->pmu) &&
3933 (e1->cpu == e2->cpu || 3933 (e1->cpu == e2->cpu ||
3934 e1->cpu == -1 || 3934 e1->cpu == -1 ||
3935 e2->cpu == -1)) 3935 e2->cpu == -1))
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 637389088b3f..26ba5654d9d5 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -820,6 +820,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
820 desc->name = name; 820 desc->name = name;
821 821
822 if (handle != handle_bad_irq && is_chained) { 822 if (handle != handle_bad_irq && is_chained) {
823 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
824
823 /* 825 /*
824 * We're about to start this interrupt immediately, 826 * We're about to start this interrupt immediately,
825 * hence the need to set the trigger configuration. 827 * hence the need to set the trigger configuration.
@@ -828,8 +830,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
828 * chained interrupt. Reset it immediately because we 830 * chained interrupt. Reset it immediately because we
829 * do know better. 831 * do know better.
830 */ 832 */
831 __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data)); 833 if (type != IRQ_TYPE_NONE) {
832 desc->handle_irq = handle; 834 __irq_set_trigger(desc, type);
835 desc->handle_irq = handle;
836 }
833 837
834 irq_settings_set_noprobe(desc); 838 irq_settings_set_noprobe(desc);
835 irq_settings_set_norequest(desc); 839 irq_settings_set_norequest(desc);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dade4c9559cc..7bc56762ca35 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5124,19 +5124,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
5124 struct trace_iterator *iter = filp->private_data; 5124 struct trace_iterator *iter = filp->private_data;
5125 ssize_t sret; 5125 ssize_t sret;
5126 5126
5127 /* return any leftover data */
5128 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5129 if (sret != -EBUSY)
5130 return sret;
5131
5132 trace_seq_init(&iter->seq);
5133
5134 /* 5127 /*
5135 * Avoid more than one consumer on a single file descriptor 5128 * Avoid more than one consumer on a single file descriptor
5136 * This is just a matter of traces coherency, the ring buffer itself 5129 * This is just a matter of traces coherency, the ring buffer itself
5137 * is protected. 5130 * is protected.
5138 */ 5131 */
5139 mutex_lock(&iter->mutex); 5132 mutex_lock(&iter->mutex);
5133
5134 /* return any leftover data */
5135 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5136 if (sret != -EBUSY)
5137 goto out;
5138
5139 trace_seq_init(&iter->seq);
5140
5140 if (iter->trace->read) { 5141 if (iter->trace->read) {
5141 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 5142 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5142 if (sret) 5143 if (sret)
@@ -6163,9 +6164,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6163 return -EBUSY; 6164 return -EBUSY;
6164#endif 6165#endif
6165 6166
6166 if (splice_grow_spd(pipe, &spd))
6167 return -ENOMEM;
6168
6169 if (*ppos & (PAGE_SIZE - 1)) 6167 if (*ppos & (PAGE_SIZE - 1))
6170 return -EINVAL; 6168 return -EINVAL;
6171 6169
@@ -6175,6 +6173,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6175 len &= PAGE_MASK; 6173 len &= PAGE_MASK;
6176 } 6174 }
6177 6175
6176 if (splice_grow_spd(pipe, &spd))
6177 return -ENOMEM;
6178
6178 again: 6179 again:
6179 trace_access_lock(iter->cpu_file); 6180 trace_access_lock(iter->cpu_file);
6180 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 6181 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@@ -6232,19 +6233,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6232 /* did we read anything? */ 6233 /* did we read anything? */
6233 if (!spd.nr_pages) { 6234 if (!spd.nr_pages) {
6234 if (ret) 6235 if (ret)
6235 return ret; 6236 goto out;
6236 6237
6238 ret = -EAGAIN;
6237 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 6239 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6238 return -EAGAIN; 6240 goto out;
6239 6241
6240 ret = wait_on_pipe(iter, true); 6242 ret = wait_on_pipe(iter, true);
6241 if (ret) 6243 if (ret)
6242 return ret; 6244 goto out;
6243 6245
6244 goto again; 6246 goto again;
6245 } 6247 }
6246 6248
6247 ret = splice_to_pipe(pipe, &spd); 6249 ret = splice_to_pipe(pipe, &spd);
6250out:
6248 splice_shrink_spd(&spd); 6251 splice_shrink_spd(&spd);
6249 6252
6250 return ret; 6253 return ret;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2e2cca509231..cab7405f48d2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -821,7 +821,7 @@ config DETECT_HUNG_TASK
821 help 821 help
822 Say Y here to enable the kernel to detect "hung tasks", 822 Say Y here to enable the kernel to detect "hung tasks",
823 which are bugs that cause the task to be stuck in 823 which are bugs that cause the task to be stuck in
824 uninterruptible "D" state indefinitiley. 824 uninterruptible "D" state indefinitely.
825 825
826 When a hung task is detected, the kernel will print the 826 When a hung task is detected, the kernel will print the
827 current stack trace (which you should report), but the 827 current stack trace (which you should report), but the
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1b7bf7314141..91f0727e3cad 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -105,10 +105,10 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
105 105
106#ifdef CONFIG_RADIX_TREE_MULTIORDER 106#ifdef CONFIG_RADIX_TREE_MULTIORDER
107 if (radix_tree_is_internal_node(entry)) { 107 if (radix_tree_is_internal_node(entry)) {
108 unsigned long siboff = get_slot_offset(parent, entry); 108 if (is_sibling_entry(parent, entry)) {
109 if (siboff < RADIX_TREE_MAP_SIZE) { 109 void **sibentry = (void **) entry_to_node(entry);
110 offset = siboff; 110 offset = get_slot_offset(parent, sibentry);
111 entry = rcu_dereference_raw(parent->slots[offset]); 111 entry = rcu_dereference_raw(*sibentry);
112 } 112 }
113 } 113 }
114#endif 114#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 4f1a34714b4a..283583fcb1e7 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1138,9 +1138,6 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1138 bool was_writable; 1138 bool was_writable;
1139 int flags = 0; 1139 int flags = 0;
1140 1140
1141 /* A PROT_NONE fault should not end up here */
1142 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1143
1144 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 1141 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
1145 if (unlikely(!pmd_same(pmd, *fe->pmd))) 1142 if (unlikely(!pmd_same(pmd, *fe->pmd)))
1146 goto out_unlock; 1143 goto out_unlock;
diff --git a/mm/ksm.c b/mm/ksm.c
index 73d43bafd9fb..5048083b60f2 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
283{ 283{
284 struct rmap_item *rmap_item; 284 struct rmap_item *rmap_item;
285 285
286 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 286 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
287 __GFP_NORETRY | __GFP_NOWARN);
287 if (rmap_item) 288 if (rmap_item)
288 ksm_rmap_items++; 289 ksm_rmap_items++;
289 return rmap_item; 290 return rmap_item;
diff --git a/mm/memory.c b/mm/memory.c
index 558c85270ae2..f1a68049edff 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3351,9 +3351,6 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
3351 bool was_writable = pte_write(pte); 3351 bool was_writable = pte_write(pte);
3352 int flags = 0; 3352 int flags = 0;
3353 3353
3354 /* A PROT_NONE fault should not end up here */
3355 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
3356
3357 /* 3354 /*
3358 * The "pte" at this point cannot be used safely without 3355 * The "pte" at this point cannot be used safely without
3359 * validation through pte_unmap_same(). It's of NUMA type but 3356 * validation through pte_unmap_same(). It's of NUMA type but
@@ -3458,6 +3455,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
3458 return VM_FAULT_FALLBACK; 3455 return VM_FAULT_FALLBACK;
3459} 3456}
3460 3457
3458static inline bool vma_is_accessible(struct vm_area_struct *vma)
3459{
3460 return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
3461}
3462
3461/* 3463/*
3462 * These routines also need to handle stuff like marking pages dirty 3464 * These routines also need to handle stuff like marking pages dirty
3463 * and/or accessed for architectures that don't do it in hardware (most 3465 * and/or accessed for architectures that don't do it in hardware (most
@@ -3524,7 +3526,7 @@ static int handle_pte_fault(struct fault_env *fe)
3524 if (!pte_present(entry)) 3526 if (!pte_present(entry))
3525 return do_swap_page(fe, entry); 3527 return do_swap_page(fe, entry);
3526 3528
3527 if (pte_protnone(entry)) 3529 if (pte_protnone(entry) && vma_is_accessible(fe->vma))
3528 return do_numa_page(fe, entry); 3530 return do_numa_page(fe, entry);
3529 3531
3530 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); 3532 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
@@ -3590,7 +3592,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3590 3592
3591 barrier(); 3593 barrier();
3592 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 3594 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
3593 if (pmd_protnone(orig_pmd)) 3595 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
3594 return do_huge_pmd_numa_page(&fe, orig_pmd); 3596 return do_huge_pmd_numa_page(&fe, orig_pmd);
3595 3597
3596 if ((fe.flags & FAULT_FLAG_WRITE) && 3598 if ((fe.flags & FAULT_FLAG_WRITE) &&
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b58906b6215c..9d29ba0f7192 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1555,8 +1555,8 @@ static struct page *new_node_page(struct page *page, unsigned long private,
1555{ 1555{
1556 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; 1556 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1557 int nid = page_to_nid(page); 1557 int nid = page_to_nid(page);
1558 nodemask_t nmask = node_online_map; 1558 nodemask_t nmask = node_states[N_MEMORY];
1559 struct page *new_page; 1559 struct page *new_page = NULL;
1560 1560
1561 /* 1561 /*
1562 * TODO: allocate a destination hugepage from a nearest neighbor node, 1562 * TODO: allocate a destination hugepage from a nearest neighbor node,
@@ -1567,14 +1567,14 @@ static struct page *new_node_page(struct page *page, unsigned long private,
1567 return alloc_huge_page_node(page_hstate(compound_head(page)), 1567 return alloc_huge_page_node(page_hstate(compound_head(page)),
1568 next_node_in(nid, nmask)); 1568 next_node_in(nid, nmask));
1569 1569
1570 if (nid != next_node_in(nid, nmask)) 1570 node_clear(nid, nmask);
1571 node_clear(nid, nmask);
1572 1571
1573 if (PageHighMem(page) 1572 if (PageHighMem(page)
1574 || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 1573 || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1575 gfp_mask |= __GFP_HIGHMEM; 1574 gfp_mask |= __GFP_HIGHMEM;
1576 1575
1577 new_page = __alloc_pages_nodemask(gfp_mask, 0, 1576 if (!nodes_empty(nmask))
1577 new_page = __alloc_pages_nodemask(gfp_mask, 0,
1578 node_zonelist(nid, gfp_mask), &nmask); 1578 node_zonelist(nid, gfp_mask), &nmask);
1579 if (!new_page) 1579 if (!new_page)
1580 new_page = __alloc_pages(gfp_mask, 0, 1580 new_page = __alloc_pages(gfp_mask, 0,
diff --git a/mm/shmem.c b/mm/shmem.c
index fd8b2b5741b1..971fc83e6402 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -270,7 +270,7 @@ bool shmem_charge(struct inode *inode, long pages)
270 info->alloced -= pages; 270 info->alloced -= pages;
271 shmem_recalc_inode(inode); 271 shmem_recalc_inode(inode);
272 spin_unlock_irqrestore(&info->lock, flags); 272 spin_unlock_irqrestore(&info->lock, flags);
273 273 shmem_unacct_blocks(info->flags, pages);
274 return false; 274 return false;
275 } 275 }
276 percpu_counter_add(&sbinfo->used_blocks, pages); 276 percpu_counter_add(&sbinfo->used_blocks, pages);
@@ -291,6 +291,7 @@ void shmem_uncharge(struct inode *inode, long pages)
291 291
292 if (sbinfo->max_blocks) 292 if (sbinfo->max_blocks)
293 percpu_counter_sub(&sbinfo->used_blocks, pages); 293 percpu_counter_sub(&sbinfo->used_blocks, pages);
294 shmem_unacct_blocks(info->flags, pages);
294} 295}
295 296
296/* 297/*
@@ -1980,7 +1981,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
1980 return addr; 1981 return addr;
1981 sb = shm_mnt->mnt_sb; 1982 sb = shm_mnt->mnt_sb;
1982 } 1983 }
1983 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) 1984 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
1984 return addr; 1985 return addr;
1985 } 1986 }
1986 1987
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b1e12a1ea9cf..0fe8b7113868 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2303,23 +2303,6 @@ out:
2303 } 2303 }
2304} 2304}
2305 2305
2306#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2307static void init_tlb_ubc(void)
2308{
2309 /*
2310 * This deliberately does not clear the cpumask as it's expensive
2311 * and unnecessary. If there happens to be data in there then the
2312 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
2313 * then will be cleared.
2314 */
2315 current->tlb_ubc.flush_required = false;
2316}
2317#else
2318static inline void init_tlb_ubc(void)
2319{
2320}
2321#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2322
2323/* 2306/*
2324 * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 2307 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
2325 */ 2308 */
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
2355 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 2338 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2356 sc->priority == DEF_PRIORITY); 2339 sc->priority == DEF_PRIORITY);
2357 2340
2358 init_tlb_ubc();
2359
2360 blk_start_plug(&plug); 2341 blk_start_plug(&plug);
2361 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2342 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2362 nr[LRU_INACTIVE_FILE]) { 2343 nr[LRU_INACTIVE_FILE]) {
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 7d170010beb9..ee08540ce503 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -335,7 +335,7 @@ int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
335 goto out; 335 goto out;
336 336
337 skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN); 337 skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
338 elp_buff = skb_push(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN); 338 elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
339 elp_packet = (struct batadv_elp_packet *)elp_buff; 339 elp_packet = (struct batadv_elp_packet *)elp_buff;
340 memset(elp_packet, 0, BATADV_ELP_HLEN); 340 memset(elp_packet, 0, BATADV_ELP_HLEN);
341 341
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7602c001e92b..3d199478c405 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -470,6 +470,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
470} 470}
471 471
472/** 472/**
473 * batadv_last_bonding_get - Get last_bonding_candidate of orig_node
474 * @orig_node: originator node whose last bonding candidate should be retrieved
475 *
476 * Return: last bonding candidate of router or NULL if not found
477 *
478 * The object is returned with refcounter increased by 1.
479 */
480static struct batadv_orig_ifinfo *
481batadv_last_bonding_get(struct batadv_orig_node *orig_node)
482{
483 struct batadv_orig_ifinfo *last_bonding_candidate;
484
485 spin_lock_bh(&orig_node->neigh_list_lock);
486 last_bonding_candidate = orig_node->last_bonding_candidate;
487
488 if (last_bonding_candidate)
489 kref_get(&last_bonding_candidate->refcount);
490 spin_unlock_bh(&orig_node->neigh_list_lock);
491
492 return last_bonding_candidate;
493}
494
495/**
473 * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node 496 * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
474 * @orig_node: originator node whose bonding candidates should be replaced 497 * @orig_node: originator node whose bonding candidates should be replaced
475 * @new_candidate: new bonding candidate or NULL 498 * @new_candidate: new bonding candidate or NULL
@@ -539,7 +562,7 @@ batadv_find_router(struct batadv_priv *bat_priv,
539 * router - obviously there are no other candidates. 562 * router - obviously there are no other candidates.
540 */ 563 */
541 rcu_read_lock(); 564 rcu_read_lock();
542 last_candidate = orig_node->last_bonding_candidate; 565 last_candidate = batadv_last_bonding_get(orig_node);
543 if (last_candidate) 566 if (last_candidate)
544 last_cand_router = rcu_dereference(last_candidate->router); 567 last_cand_router = rcu_dereference(last_candidate->router);
545 568
@@ -631,6 +654,9 @@ next:
631 batadv_orig_ifinfo_put(next_candidate); 654 batadv_orig_ifinfo_put(next_candidate);
632 } 655 }
633 656
657 if (last_candidate)
658 batadv_orig_ifinfo_put(last_candidate);
659
634 return router; 660 return router;
635} 661}
636 662
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 4b351af3e67b..d6feabb03516 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -312,6 +312,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
312{ 312{
313 const struct iphdr *iph = ip_hdr(skb); 313 const struct iphdr *iph = ip_hdr(skb);
314 struct rtable *rt; 314 struct rtable *rt;
315 struct net_device *dev = skb->dev;
315 316
316 /* if ingress device is enslaved to an L3 master device pass the 317 /* if ingress device is enslaved to an L3 master device pass the
317 * skb to its handler for processing 318 * skb to its handler for processing
@@ -341,7 +342,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
341 */ 342 */
342 if (!skb_valid_dst(skb)) { 343 if (!skb_valid_dst(skb)) {
343 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, 344 int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
344 iph->tos, skb->dev); 345 iph->tos, dev);
345 if (unlikely(err)) { 346 if (unlikely(err)) {
346 if (err == -EXDEV) 347 if (err == -EXDEV)
347 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); 348 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
@@ -370,7 +371,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
370 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len); 371 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
371 } else if (skb->pkt_type == PACKET_BROADCAST || 372 } else if (skb->pkt_type == PACKET_BROADCAST ||
372 skb->pkt_type == PACKET_MULTICAST) { 373 skb->pkt_type == PACKET_MULTICAST) {
373 struct in_device *in_dev = __in_dev_get_rcu(skb->dev); 374 struct in_device *in_dev = __in_dev_get_rcu(dev);
374 375
375 /* RFC 1122 3.3.6: 376 /* RFC 1122 3.3.6:
376 * 377 *
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index cc701fa70b12..5d7944f394d9 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -88,6 +88,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
88 struct net_device *dev; 88 struct net_device *dev;
89 struct pcpu_sw_netstats *tstats; 89 struct pcpu_sw_netstats *tstats;
90 struct xfrm_state *x; 90 struct xfrm_state *x;
91 struct xfrm_mode *inner_mode;
91 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; 92 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
92 u32 orig_mark = skb->mark; 93 u32 orig_mark = skb->mark;
93 int ret; 94 int ret;
@@ -105,7 +106,19 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
105 } 106 }
106 107
107 x = xfrm_input_state(skb); 108 x = xfrm_input_state(skb);
108 family = x->inner_mode->afinfo->family; 109
110 inner_mode = x->inner_mode;
111
112 if (x->sel.family == AF_UNSPEC) {
113 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
114 if (inner_mode == NULL) {
115 XFRM_INC_STATS(dev_net(skb->dev),
116 LINUX_MIB_XFRMINSTATEMODEERROR);
117 return -EINVAL;
118 }
119 }
120
121 family = inner_mode->afinfo->family;
109 122
110 skb->mark = be32_to_cpu(tunnel->parms.i_key); 123 skb->mark = be32_to_cpu(tunnel->parms.i_key);
111 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 124 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 26253328d227..a87bcd2d4a94 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2076,6 +2076,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2076 struct rta_mfc_stats mfcs; 2076 struct rta_mfc_stats mfcs;
2077 struct nlattr *mp_attr; 2077 struct nlattr *mp_attr;
2078 struct rtnexthop *nhp; 2078 struct rtnexthop *nhp;
2079 unsigned long lastuse;
2079 int ct; 2080 int ct;
2080 2081
2081 /* If cache is unresolved, don't try to parse IIF and OIF */ 2082 /* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2105,12 +2106,14 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2105 2106
2106 nla_nest_end(skb, mp_attr); 2107 nla_nest_end(skb, mp_attr);
2107 2108
2109 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2110 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2111
2108 mfcs.mfcs_packets = c->mfc_un.res.pkt; 2112 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2109 mfcs.mfcs_bytes = c->mfc_un.res.bytes; 2113 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2110 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; 2114 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2111 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) || 2115 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2112 nla_put_u64_64bit(skb, RTA_EXPIRES, 2116 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2113 jiffies_to_clock_t(c->mfc_un.res.lastuse),
2114 RTA_PAD)) 2117 RTA_PAD))
2115 return -EMSGSIZE; 2118 return -EMSGSIZE;
2116 2119
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
index 2375b0a8be46..30493beb611a 100644
--- a/net/ipv4/netfilter/nft_chain_route_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
31 __be32 saddr, daddr; 31 __be32 saddr, daddr;
32 u_int8_t tos; 32 u_int8_t tos;
33 const struct iphdr *iph; 33 const struct iphdr *iph;
34 int err;
34 35
35 /* root is playing with raw sockets. */ 36 /* root is playing with raw sockets. */
36 if (skb->len < sizeof(struct iphdr) || 37 if (skb->len < sizeof(struct iphdr) ||
@@ -46,15 +47,17 @@ static unsigned int nf_route_table_hook(void *priv,
46 tos = iph->tos; 47 tos = iph->tos;
47 48
48 ret = nft_do_chain(&pkt, priv); 49 ret = nft_do_chain(&pkt, priv);
49 if (ret != NF_DROP && ret != NF_QUEUE) { 50 if (ret != NF_DROP && ret != NF_STOLEN) {
50 iph = ip_hdr(skb); 51 iph = ip_hdr(skb);
51 52
52 if (iph->saddr != saddr || 53 if (iph->saddr != saddr ||
53 iph->daddr != daddr || 54 iph->daddr != daddr ||
54 skb->mark != mark || 55 skb->mark != mark ||
55 iph->tos != tos) 56 iph->tos != tos) {
56 if (ip_route_me_harder(state->net, skb, RTN_UNSPEC)) 57 err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
57 ret = NF_DROP; 58 if (err < 0)
59 ret = NF_DROP_ERR(err);
60 }
58 } 61 }
59 return ret; 62 return ret;
60} 63}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a1f2830d8110..b5b47a26d4ec 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -476,12 +476,18 @@ u32 ip_idents_reserve(u32 hash, int segs)
476 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; 476 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
477 u32 old = ACCESS_ONCE(*p_tstamp); 477 u32 old = ACCESS_ONCE(*p_tstamp);
478 u32 now = (u32)jiffies; 478 u32 now = (u32)jiffies;
479 u32 delta = 0; 479 u32 new, delta = 0;
480 480
481 if (old != now && cmpxchg(p_tstamp, old, now) == old) 481 if (old != now && cmpxchg(p_tstamp, old, now) == old)
482 delta = prandom_u32_max(now - old); 482 delta = prandom_u32_max(now - old);
483 483
484 return atomic_add_return(segs + delta, p_id) - segs; 484 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
485 do {
486 old = (u32)atomic_read(p_id);
487 new = old + delta + segs;
488 } while (atomic_cmpxchg(p_id, old, new) != old);
489
490 return new - segs;
485} 491}
486EXPORT_SYMBOL(ip_idents_reserve); 492EXPORT_SYMBOL(ip_idents_reserve);
487 493
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3ebf45b38bc3..08323bd95f2a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5885,7 +5885,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
5885 * so release it. 5885 * so release it.
5886 */ 5886 */
5887 if (req) { 5887 if (req) {
5888 tp->total_retrans = req->num_retrans; 5888 inet_csk(sk)->icsk_retransmits = 0;
5889 reqsk_fastopen_remove(sk, req, false); 5889 reqsk_fastopen_remove(sk, req, false);
5890 } else { 5890 } else {
5891 /* Make sure socket is routed, for correct metrics. */ 5891 /* Make sure socket is routed, for correct metrics. */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bdaef7fd6e47..5288cec4a2b2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2605,7 +2605,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2605 * copying overhead: fragmentation, tunneling, mangling etc. 2605 * copying overhead: fragmentation, tunneling, mangling etc.
2606 */ 2606 */
2607 if (atomic_read(&sk->sk_wmem_alloc) > 2607 if (atomic_read(&sk->sk_wmem_alloc) >
2608 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2608 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2609 sk->sk_sndbuf))
2609 return -EAGAIN; 2610 return -EAGAIN;
2610 2611
2611 if (skb_still_in_host_queue(sk, skb)) 2612 if (skb_still_in_host_queue(sk, skb))
@@ -2830,7 +2831,7 @@ begin_fwd:
2830 if (tcp_retransmit_skb(sk, skb, segs)) 2831 if (tcp_retransmit_skb(sk, skb, segs))
2831 return; 2832 return;
2832 2833
2833 NET_INC_STATS(sock_net(sk), mib_idx); 2834 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
2834 2835
2835 if (tcp_in_cwnd_reduction(sk)) 2836 if (tcp_in_cwnd_reduction(sk))
2836 tp->prr_out += tcp_skb_pcount(skb); 2837 tp->prr_out += tcp_skb_pcount(skb);
@@ -3567,6 +3568,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
3567 if (!res) { 3568 if (!res) {
3568 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 3569 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
3569 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 3570 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3571 if (unlikely(tcp_passive_fastopen(sk)))
3572 tcp_sk(sk)->total_retrans++;
3570 } 3573 }
3571 return res; 3574 return res;
3572} 3575}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d84930b2dd95..f712b411f6ed 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -384,6 +384,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
384 */ 384 */
385 inet_rtx_syn_ack(sk, req); 385 inet_rtx_syn_ack(sk, req);
386 req->num_timeout++; 386 req->num_timeout++;
387 icsk->icsk_retransmits++;
387 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 388 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
388 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); 389 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
389} 390}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d90a11f14040..5bd3afdcc771 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -321,11 +321,9 @@ static int vti6_rcv(struct sk_buff *skb)
321 goto discard; 321 goto discard;
322 } 322 }
323 323
324 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
325
326 rcu_read_unlock(); 324 rcu_read_unlock();
327 325
328 return xfrm6_rcv(skb); 326 return xfrm6_rcv_tnl(skb, t);
329 } 327 }
330 rcu_read_unlock(); 328 rcu_read_unlock();
331 return -EINVAL; 329 return -EINVAL;
@@ -340,6 +338,7 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
340 struct net_device *dev; 338 struct net_device *dev;
341 struct pcpu_sw_netstats *tstats; 339 struct pcpu_sw_netstats *tstats;
342 struct xfrm_state *x; 340 struct xfrm_state *x;
341 struct xfrm_mode *inner_mode;
343 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; 342 struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
344 u32 orig_mark = skb->mark; 343 u32 orig_mark = skb->mark;
345 int ret; 344 int ret;
@@ -357,7 +356,19 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
357 } 356 }
358 357
359 x = xfrm_input_state(skb); 358 x = xfrm_input_state(skb);
360 family = x->inner_mode->afinfo->family; 359
360 inner_mode = x->inner_mode;
361
362 if (x->sel.family == AF_UNSPEC) {
363 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
364 if (inner_mode == NULL) {
365 XFRM_INC_STATS(dev_net(skb->dev),
366 LINUX_MIB_XFRMINSTATEMODEERROR);
367 return -EINVAL;
368 }
369 }
370
371 family = inner_mode->afinfo->family;
361 372
362 skb->mark = be32_to_cpu(t->parms.i_key); 373 skb->mark = be32_to_cpu(t->parms.i_key);
363 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 374 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6122f9c5cc49..fccb5dd91902 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2239,6 +2239,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2239 struct rta_mfc_stats mfcs; 2239 struct rta_mfc_stats mfcs;
2240 struct nlattr *mp_attr; 2240 struct nlattr *mp_attr;
2241 struct rtnexthop *nhp; 2241 struct rtnexthop *nhp;
2242 unsigned long lastuse;
2242 int ct; 2243 int ct;
2243 2244
2244 /* If cache is unresolved, don't try to parse IIF and OIF */ 2245 /* If cache is unresolved, don't try to parse IIF and OIF */
@@ -2269,12 +2270,14 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2269 2270
2270 nla_nest_end(skb, mp_attr); 2271 nla_nest_end(skb, mp_attr);
2271 2272
2273 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2274 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2275
2272 mfcs.mfcs_packets = c->mfc_un.res.pkt; 2276 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2273 mfcs.mfcs_bytes = c->mfc_un.res.bytes; 2277 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2274 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if; 2278 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2275 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) || 2279 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2276 nla_put_u64_64bit(skb, RTA_EXPIRES, 2280 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2277 jiffies_to_clock_t(c->mfc_un.res.lastuse),
2278 RTA_PAD)) 2281 RTA_PAD))
2279 return -EMSGSIZE; 2282 return -EMSGSIZE;
2280 2283
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
index 71d995ff3108..2535223ba956 100644
--- a/net/ipv6/netfilter/nft_chain_route_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c
@@ -31,6 +31,7 @@ static unsigned int nf_route_table_hook(void *priv,
31 struct in6_addr saddr, daddr; 31 struct in6_addr saddr, daddr;
32 u_int8_t hop_limit; 32 u_int8_t hop_limit;
33 u32 mark, flowlabel; 33 u32 mark, flowlabel;
34 int err;
34 35
35 /* malformed packet, drop it */ 36 /* malformed packet, drop it */
36 if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0) 37 if (nft_set_pktinfo_ipv6(&pkt, skb, state) < 0)
@@ -46,13 +47,16 @@ static unsigned int nf_route_table_hook(void *priv,
46 flowlabel = *((u32 *)ipv6_hdr(skb)); 47 flowlabel = *((u32 *)ipv6_hdr(skb));
47 48
48 ret = nft_do_chain(&pkt, priv); 49 ret = nft_do_chain(&pkt, priv);
49 if (ret != NF_DROP && ret != NF_QUEUE && 50 if (ret != NF_DROP && ret != NF_STOLEN &&
50 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 51 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
51 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 52 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
52 skb->mark != mark || 53 skb->mark != mark ||
53 ipv6_hdr(skb)->hop_limit != hop_limit || 54 ipv6_hdr(skb)->hop_limit != hop_limit ||
54 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) 55 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
55 return ip6_route_me_harder(state->net, skb) == 0 ? ret : NF_DROP; 56 err = ip6_route_me_harder(state->net, skb);
57 if (err < 0)
58 ret = NF_DROP_ERR(err);
59 }
56 60
57 return ret; 61 return ret;
58} 62}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 49817555449e..e3a224b97905 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1986,9 +1986,18 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
1986 if (!(gwa_type & IPV6_ADDR_UNICAST)) 1986 if (!(gwa_type & IPV6_ADDR_UNICAST))
1987 goto out; 1987 goto out;
1988 1988
1989 if (cfg->fc_table) 1989 if (cfg->fc_table) {
1990 grt = ip6_nh_lookup_table(net, cfg, gw_addr); 1990 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
1991 1991
1992 if (grt) {
1993 if (grt->rt6i_flags & RTF_GATEWAY ||
1994 (dev && dev != grt->dst.dev)) {
1995 ip6_rt_put(grt);
1996 grt = NULL;
1997 }
1998 }
1999 }
2000
1992 if (!grt) 2001 if (!grt)
1993 grt = rt6_lookup(net, gw_addr, NULL, 2002 grt = rt6_lookup(net, gw_addr, NULL,
1994 cfg->fc_ifindex, 1); 2003 cfg->fc_ifindex, 1);
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 00a2d40677d6..b5789562aded 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -21,9 +21,10 @@ int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb)
21 return xfrm6_extract_header(skb); 21 return xfrm6_extract_header(skb);
22} 22}
23 23
24int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) 24int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
25 struct ip6_tnl *t)
25{ 26{
26 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 27 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
27 XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 28 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
28 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 29 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
29 return xfrm_input(skb, nexthdr, spi, 0); 30 return xfrm_input(skb, nexthdr, spi, 0);
@@ -49,13 +50,18 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
49 return -1; 50 return -1;
50} 51}
51 52
52int xfrm6_rcv(struct sk_buff *skb) 53int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
53{ 54{
54 return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff], 55 return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
55 0); 56 0, t);
56} 57}
57EXPORT_SYMBOL(xfrm6_rcv); 58EXPORT_SYMBOL(xfrm6_rcv_tnl);
58 59
60int xfrm6_rcv(struct sk_buff *skb)
61{
62 return xfrm6_rcv_tnl(skb, NULL);
63}
64EXPORT_SYMBOL(xfrm6_rcv);
59int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 65int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
60 xfrm_address_t *saddr, u8 proto) 66 xfrm_address_t *saddr, u8 proto)
61{ 67{
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 5743044cd660..e1c0bbe7996c 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -236,7 +236,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
236 __be32 spi; 236 __be32 spi;
237 237
238 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr); 238 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
239 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi); 239 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
240} 240}
241 241
242static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 242static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 8d2f7c9b491d..ccc244406fb9 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -832,7 +832,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
832 struct sock *sk = sock->sk; 832 struct sock *sk = sock->sk;
833 struct irda_sock *new, *self = irda_sk(sk); 833 struct irda_sock *new, *self = irda_sk(sk);
834 struct sock *newsk; 834 struct sock *newsk;
835 struct sk_buff *skb; 835 struct sk_buff *skb = NULL;
836 int err; 836 int err;
837 837
838 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); 838 err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0);
@@ -900,7 +900,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
900 err = -EPERM; /* value does not seem to make sense. -arnd */ 900 err = -EPERM; /* value does not seem to make sense. -arnd */
901 if (!new->tsap) { 901 if (!new->tsap) {
902 pr_debug("%s(), dup failed!\n", __func__); 902 pr_debug("%s(), dup failed!\n", __func__);
903 kfree_skb(skb);
904 goto out; 903 goto out;
905 } 904 }
906 905
@@ -919,7 +918,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
919 /* Clean up the original one to keep it in listen state */ 918 /* Clean up the original one to keep it in listen state */
920 irttp_listen(self->tsap); 919 irttp_listen(self->tsap);
921 920
922 kfree_skb(skb);
923 sk->sk_ack_backlog--; 921 sk->sk_ack_backlog--;
924 922
925 newsock->state = SS_CONNECTED; 923 newsock->state = SS_CONNECTED;
@@ -927,6 +925,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
927 irda_connect_response(new); 925 irda_connect_response(new);
928 err = 0; 926 err = 0;
929out: 927out:
928 kfree_skb(skb);
930 release_sock(sk); 929 release_sock(sk);
931 return err; 930 return err;
932} 931}
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a9aff6079c42..afa94687d5e1 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -261,10 +261,16 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
261 .timeout = timeout, 261 .timeout = timeout,
262 .ssn = start_seq_num, 262 .ssn = start_seq_num,
263 }; 263 };
264
265 int i, ret = -EOPNOTSUPP; 264 int i, ret = -EOPNOTSUPP;
266 u16 status = WLAN_STATUS_REQUEST_DECLINED; 265 u16 status = WLAN_STATUS_REQUEST_DECLINED;
267 266
267 if (tid >= IEEE80211_FIRST_TSPEC_TSID) {
268 ht_dbg(sta->sdata,
269 "STA %pM requests BA session on unsupported tid %d\n",
270 sta->sta.addr, tid);
271 goto end_no_lock;
272 }
273
268 if (!sta->sta.ht_cap.ht_supported) { 274 if (!sta->sta.ht_cap.ht_supported) {
269 ht_dbg(sta->sdata, 275 ht_dbg(sta->sdata,
270 "STA %pM erroneously requests BA session on tid %d w/o QoS\n", 276 "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5650c46bf91a..45319cc01121 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -584,6 +584,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
584 ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) 584 ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
585 return -EINVAL; 585 return -EINVAL;
586 586
587 if (WARN_ON(tid >= IEEE80211_FIRST_TSPEC_TSID))
588 return -EINVAL;
589
587 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n", 590 ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
588 pubsta->addr, tid); 591 pubsta->addr, tid);
589 592
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 8f9c3bde835f..faccef977670 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -746,6 +746,7 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
746 sta = next_hop_deref_protected(mpath); 746 sta = next_hop_deref_protected(mpath);
747 if (mpath->flags & MESH_PATH_ACTIVE && 747 if (mpath->flags & MESH_PATH_ACTIVE &&
748 ether_addr_equal(ta, sta->sta.addr) && 748 ether_addr_equal(ta, sta->sta.addr) &&
749 !(mpath->flags & MESH_PATH_FIXED) &&
749 (!(mpath->flags & MESH_PATH_SN_VALID) || 750 (!(mpath->flags & MESH_PATH_SN_VALID) ||
750 SN_GT(target_sn, mpath->sn) || target_sn == 0)) { 751 SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
751 mpath->flags &= ~MESH_PATH_ACTIVE; 752 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -1012,7 +1013,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
1012 goto enddiscovery; 1013 goto enddiscovery;
1013 1014
1014 spin_lock_bh(&mpath->state_lock); 1015 spin_lock_bh(&mpath->state_lock);
1015 if (mpath->flags & MESH_PATH_DELETED) { 1016 if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1016 spin_unlock_bh(&mpath->state_lock); 1017 spin_unlock_bh(&mpath->state_lock);
1017 goto enddiscovery; 1018 goto enddiscovery;
1018 } 1019 }
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 6db2ddfa0695..f0e6175a9821 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -826,7 +826,7 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
826 mpath->metric = 0; 826 mpath->metric = 0;
827 mpath->hop_count = 0; 827 mpath->hop_count = 0;
828 mpath->exp_time = 0; 828 mpath->exp_time = 0;
829 mpath->flags |= MESH_PATH_FIXED; 829 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
830 mesh_path_activate(mpath); 830 mesh_path_activate(mpath);
831 spin_unlock_bh(&mpath->state_lock); 831 spin_unlock_bh(&mpath->state_lock);
832 mesh_path_tx_pending(mpath); 832 mesh_path_tx_pending(mpath);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 76b737dcc36f..aa58df80ede0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1616,7 +1616,6 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1616 1616
1617 sta_info_recalc_tim(sta); 1617 sta_info_recalc_tim(sta);
1618 } else { 1618 } else {
1619 unsigned long tids = sta->txq_buffered_tids & driver_release_tids;
1620 int tid; 1619 int tid;
1621 1620
1622 /* 1621 /*
@@ -1648,7 +1647,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1648 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1647 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) {
1649 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1648 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
1650 1649
1651 if (!(tids & BIT(tid)) || txqi->tin.backlog_packets) 1650 if (!(driver_release_tids & BIT(tid)) ||
1651 txqi->tin.backlog_packets)
1652 continue; 1652 continue;
1653 1653
1654 sta_info_recalc_tim(sta); 1654 sta_info_recalc_tim(sta);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 502396694f47..18b285e06bc8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -796,6 +796,36 @@ static __le16 ieee80211_tx_next_seq(struct sta_info *sta, int tid)
796 return ret; 796 return ret;
797} 797}
798 798
799static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
800 struct ieee80211_vif *vif,
801 struct ieee80211_sta *pubsta,
802 struct sk_buff *skb)
803{
804 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
805 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
806 struct ieee80211_txq *txq = NULL;
807
808 if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
809 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
810 return NULL;
811
812 if (!ieee80211_is_data(hdr->frame_control))
813 return NULL;
814
815 if (pubsta) {
816 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
817
818 txq = pubsta->txq[tid];
819 } else if (vif) {
820 txq = vif->txq;
821 }
822
823 if (!txq)
824 return NULL;
825
826 return to_txq_info(txq);
827}
828
799static ieee80211_tx_result debug_noinline 829static ieee80211_tx_result debug_noinline
800ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) 830ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
801{ 831{
@@ -853,7 +883,8 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
853 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 883 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
854 tx->sta->tx_stats.msdu[tid]++; 884 tx->sta->tx_stats.msdu[tid]++;
855 885
856 if (!tx->sta->sta.txq[0]) 886 if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta,
887 tx->skb))
857 hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); 888 hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
858 889
859 return TX_CONTINUE; 890 return TX_CONTINUE;
@@ -1243,36 +1274,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1243 return TX_CONTINUE; 1274 return TX_CONTINUE;
1244} 1275}
1245 1276
1246static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
1247 struct ieee80211_vif *vif,
1248 struct ieee80211_sta *pubsta,
1249 struct sk_buff *skb)
1250{
1251 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1252 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1253 struct ieee80211_txq *txq = NULL;
1254
1255 if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) ||
1256 (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
1257 return NULL;
1258
1259 if (!ieee80211_is_data(hdr->frame_control))
1260 return NULL;
1261
1262 if (pubsta) {
1263 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1264
1265 txq = pubsta->txq[tid];
1266 } else if (vif) {
1267 txq = vif->txq;
1268 }
1269
1270 if (!txq)
1271 return NULL;
1272
1273 return to_txq_info(txq);
1274}
1275
1276static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb) 1277static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb)
1277{ 1278{
1278 IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time(); 1279 IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time();
@@ -1514,8 +1515,12 @@ out:
1514 spin_unlock_bh(&fq->lock); 1515 spin_unlock_bh(&fq->lock);
1515 1516
1516 if (skb && skb_has_frag_list(skb) && 1517 if (skb && skb_has_frag_list(skb) &&
1517 !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) 1518 !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
1518 skb_linearize(skb); 1519 if (skb_linearize(skb)) {
1520 ieee80211_free_txskb(&local->hw, skb);
1521 return NULL;
1522 }
1523 }
1519 1524
1520 return skb; 1525 return skb;
1521} 1526}
@@ -3264,7 +3269,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
3264 3269
3265 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { 3270 if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
3266 *ieee80211_get_qos_ctl(hdr) = tid; 3271 *ieee80211_get_qos_ctl(hdr) = tid;
3267 if (!sta->sta.txq[0]) 3272 if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb))
3268 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); 3273 hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
3269 } else { 3274 } else {
3270 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 3275 info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index dd2c43abf9e2..9934b0c93c1e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1035,9 +1035,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
1035 if (IS_ERR(ct)) 1035 if (IS_ERR(ct))
1036 return (struct nf_conntrack_tuple_hash *)ct; 1036 return (struct nf_conntrack_tuple_hash *)ct;
1037 1037
1038 if (tmpl && nfct_synproxy(tmpl)) { 1038 if (!nf_ct_add_synproxy(ct, tmpl)) {
1039 nfct_seqadj_ext_add(ct); 1039 nf_conntrack_free(ct);
1040 nfct_synproxy_ext_add(ct); 1040 return ERR_PTR(-ENOMEM);
1041 } 1041 }
1042 1042
1043 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; 1043 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index de31818417b8..ecee105bbada 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -441,7 +441,8 @@ nf_nat_setup_info(struct nf_conn *ct,
441 ct->status |= IPS_DST_NAT; 441 ct->status |= IPS_DST_NAT;
442 442
443 if (nfct_help(ct)) 443 if (nfct_help(ct))
444 nfct_seqadj_ext_add(ct); 444 if (!nfct_seqadj_ext_add(ct))
445 return NF_DROP;
445 } 446 }
446 447
447 if (maniptype == NF_NAT_MANIP_SRC) { 448 if (maniptype == NF_NAT_MANIP_SRC) {
@@ -807,7 +808,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
807 if (err < 0) 808 if (err < 0)
808 return err; 809 return err;
809 810
810 return nf_nat_setup_info(ct, &range, manip); 811 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
811} 812}
812#else 813#else
813static int 814static int
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index 39eb1cc62e91..fa24a5b398b1 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -237,7 +237,7 @@ void nft_trace_notify(struct nft_traceinfo *info)
237 break; 237 break;
238 case NFT_TRACETYPE_POLICY: 238 case NFT_TRACETYPE_POLICY:
239 if (nla_put_be32(skb, NFTA_TRACE_POLICY, 239 if (nla_put_be32(skb, NFTA_TRACE_POLICY,
240 info->basechain->policy)) 240 htonl(info->basechain->policy)))
241 goto nla_put_failure; 241 goto nla_put_failure;
242 break; 242 break;
243 } 243 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 69444d32ecda..1555fb8c68e0 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -796,27 +796,34 @@ struct sctp_hash_cmp_arg {
796static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, 796static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
797 const void *ptr) 797 const void *ptr)
798{ 798{
799 struct sctp_transport *t = (struct sctp_transport *)ptr;
799 const struct sctp_hash_cmp_arg *x = arg->key; 800 const struct sctp_hash_cmp_arg *x = arg->key;
800 const struct sctp_transport *t = ptr; 801 struct sctp_association *asoc;
801 struct sctp_association *asoc = t->asoc; 802 int err = 1;
802 const struct net *net = x->net;
803 803
804 if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr)) 804 if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
805 return 1; 805 return err;
806 if (!net_eq(sock_net(asoc->base.sk), net)) 806 if (!sctp_transport_hold(t))
807 return 1; 807 return err;
808
809 asoc = t->asoc;
810 if (!net_eq(sock_net(asoc->base.sk), x->net))
811 goto out;
808 if (x->ep) { 812 if (x->ep) {
809 if (x->ep != asoc->ep) 813 if (x->ep != asoc->ep)
810 return 1; 814 goto out;
811 } else { 815 } else {
812 if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port)) 816 if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port))
813 return 1; 817 goto out;
814 if (!sctp_bind_addr_match(&asoc->base.bind_addr, 818 if (!sctp_bind_addr_match(&asoc->base.bind_addr,
815 x->laddr, sctp_sk(asoc->base.sk))) 819 x->laddr, sctp_sk(asoc->base.sk)))
816 return 1; 820 goto out;
817 } 821 }
818 822
819 return 0; 823 err = 0;
824out:
825 sctp_transport_put(t);
826 return err;
820} 827}
821 828
822static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed) 829static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f02653a08993..4809f4d2cdcc 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6978,7 +6978,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
6978 6978
6979 params.n_counter_offsets_presp = len / sizeof(u16); 6979 params.n_counter_offsets_presp = len / sizeof(u16);
6980 if (rdev->wiphy.max_num_csa_counters && 6980 if (rdev->wiphy.max_num_csa_counters &&
6981 (params.n_counter_offsets_beacon > 6981 (params.n_counter_offsets_presp >
6982 rdev->wiphy.max_num_csa_counters)) 6982 rdev->wiphy.max_num_csa_counters))
6983 return -EINVAL; 6983 return -EINVAL;
6984 6984
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9895a8c56d8c..a30f898dc1c5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -332,6 +332,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
332{ 332{
333 tasklet_hrtimer_cancel(&x->mtimer); 333 tasklet_hrtimer_cancel(&x->mtimer);
334 del_timer_sync(&x->rtimer); 334 del_timer_sync(&x->rtimer);
335 kfree(x->aead);
335 kfree(x->aalg); 336 kfree(x->aalg);
336 kfree(x->ealg); 337 kfree(x->ealg);
337 kfree(x->calg); 338 kfree(x->calg);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index cb65d916a345..08892091cfe3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -581,9 +581,12 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
581 if (err) 581 if (err)
582 goto error; 582 goto error;
583 583
584 if (attrs[XFRMA_SEC_CTX] && 584 if (attrs[XFRMA_SEC_CTX]) {
585 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) 585 err = security_xfrm_state_alloc(x,
586 goto error; 586 nla_data(attrs[XFRMA_SEC_CTX]));
587 if (err)
588 goto error;
589 }
587 590
588 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 591 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
589 attrs[XFRMA_REPLAY_ESN_VAL]))) 592 attrs[XFRMA_REPLAY_ESN_VAL])))
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 42396a74405d..a68f03133df9 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -363,6 +363,7 @@ is_mcounted_section_name(char const *const txtname)
363 strcmp(".sched.text", txtname) == 0 || 363 strcmp(".sched.text", txtname) == 0 ||
364 strcmp(".spinlock.text", txtname) == 0 || 364 strcmp(".spinlock.text", txtname) == 0 ||
365 strcmp(".irqentry.text", txtname) == 0 || 365 strcmp(".irqentry.text", txtname) == 0 ||
366 strcmp(".softirqentry.text", txtname) == 0 ||
366 strcmp(".kprobes.text", txtname) == 0 || 367 strcmp(".kprobes.text", txtname) == 0 ||
367 strcmp(".text.unlikely", txtname) == 0; 368 strcmp(".text.unlikely", txtname) == 0;
368} 369}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 96e2486a6fc4..2d48011bc362 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -134,6 +134,7 @@ my %text_sections = (
134 ".sched.text" => 1, 134 ".sched.text" => 1,
135 ".spinlock.text" => 1, 135 ".spinlock.text" => 1,
136 ".irqentry.text" => 1, 136 ".irqentry.text" => 1,
137 ".softirqentry.text" => 1,
137 ".kprobes.text" => 1, 138 ".kprobes.text" => 1,
138 ".text.unlikely" => 1, 139 ".text.unlikely" => 1,
139); 140);
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 5adbfc32242f..17a06105ccb6 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -29,6 +29,7 @@
29#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/ctype.h> 31#include <linux/ctype.h>
32#include <crypto/aes.h>
32#include <crypto/hash.h> 33#include <crypto/hash.h>
33#include <crypto/sha.h> 34#include <crypto/sha.h>
34#include <crypto/skcipher.h> 35#include <crypto/skcipher.h>
@@ -478,6 +479,7 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
478 struct crypto_skcipher *tfm; 479 struct crypto_skcipher *tfm;
479 struct skcipher_request *req; 480 struct skcipher_request *req;
480 unsigned int encrypted_datalen; 481 unsigned int encrypted_datalen;
482 u8 iv[AES_BLOCK_SIZE];
481 unsigned int padlen; 483 unsigned int padlen;
482 char pad[16]; 484 char pad[16];
483 int ret; 485 int ret;
@@ -500,8 +502,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
500 sg_init_table(sg_out, 1); 502 sg_init_table(sg_out, 1);
501 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); 503 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
502 504
503 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, 505 memcpy(iv, epayload->iv, sizeof(iv));
504 epayload->iv); 506 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
505 ret = crypto_skcipher_encrypt(req); 507 ret = crypto_skcipher_encrypt(req);
506 tfm = crypto_skcipher_reqtfm(req); 508 tfm = crypto_skcipher_reqtfm(req);
507 skcipher_request_free(req); 509 skcipher_request_free(req);
@@ -581,6 +583,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
581 struct crypto_skcipher *tfm; 583 struct crypto_skcipher *tfm;
582 struct skcipher_request *req; 584 struct skcipher_request *req;
583 unsigned int encrypted_datalen; 585 unsigned int encrypted_datalen;
586 u8 iv[AES_BLOCK_SIZE];
584 char pad[16]; 587 char pad[16];
585 int ret; 588 int ret;
586 589
@@ -599,8 +602,8 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
599 epayload->decrypted_datalen); 602 epayload->decrypted_datalen);
600 sg_set_buf(&sg_out[1], pad, sizeof pad); 603 sg_set_buf(&sg_out[1], pad, sizeof pad);
601 604
602 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, 605 memcpy(iv, epayload->iv, sizeof(iv));
603 epayload->iv); 606 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
604 ret = crypto_skcipher_decrypt(req); 607 ret = crypto_skcipher_decrypt(req);
605 tfm = crypto_skcipher_reqtfm(req); 608 tfm = crypto_skcipher_reqtfm(req);
606 skcipher_request_free(req); 609 skcipher_request_free(req);
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3b530467148e..9d0919ed52a4 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,5 +1,5 @@
1 1
2CFLAGS += -I. -g -Wall -D_LGPL_SOURCE 2CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
3LDFLAGS += -lpthread -lurcu 3LDFLAGS += -lpthread -lurcu
4TARGETS = main 4TARGETS = main
5OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ 5OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 39d9b9568fe2..05d7bc488971 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -124,6 +124,8 @@ static void multiorder_check(unsigned long index, int order)
124 unsigned long i; 124 unsigned long i;
125 unsigned long min = index & ~((1UL << order) - 1); 125 unsigned long min = index & ~((1UL << order) - 1);
126 unsigned long max = min + (1UL << order); 126 unsigned long max = min + (1UL << order);
127 void **slot;
128 struct item *item2 = item_create(min);
127 RADIX_TREE(tree, GFP_KERNEL); 129 RADIX_TREE(tree, GFP_KERNEL);
128 130
129 printf("Multiorder index %ld, order %d\n", index, order); 131 printf("Multiorder index %ld, order %d\n", index, order);
@@ -139,13 +141,19 @@ static void multiorder_check(unsigned long index, int order)
139 item_check_absent(&tree, i); 141 item_check_absent(&tree, i);
140 for (i = max; i < 2*max; i++) 142 for (i = max; i < 2*max; i++)
141 item_check_absent(&tree, i); 143 item_check_absent(&tree, i);
144 for (i = min; i < max; i++)
145 assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
146
147 slot = radix_tree_lookup_slot(&tree, index);
148 free(*slot);
149 radix_tree_replace_slot(slot, item2);
142 for (i = min; i < max; i++) { 150 for (i = min; i < max; i++) {
143 static void *entry = (void *) 151 struct item *item = item_lookup(&tree, i);
144 (0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY); 152 assert(item != 0);
145 assert(radix_tree_insert(&tree, i, entry) == -EEXIST); 153 assert(item->index == min);
146 } 154 }
147 155
148 assert(item_delete(&tree, index) != 0); 156 assert(item_delete(&tree, min) != 0);
149 157
150 for (i = 0; i < 2*max; i++) 158 for (i = 0; i < 2*max; i++)
151 item_check_absent(&tree, i); 159 item_check_absent(&tree, i);