aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-02 21:17:07 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-02 22:20:41 -0400
commitb50afd203a5ef1998c18d6519ad2b2c546d6af22 (patch)
tree608e6845e78a4ffe623c7cdf6581e29e2d9be0a9
parentd6169b0206db1c8c8d0e4c6b79fdf4b2fc6455f1 (diff)
parentc8d2bc9bc39ebea8437fd974fdbc21847bb897a3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three sets of overlapping changes. Nothing serious. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt1
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/kernel/devtree.c14
-rw-r--r--arch/arm64/include/asm/debug-monitors.h2
-rw-r--r--arch/arm64/kernel/kgdb.c36
-rw-r--r--arch/arm64/kernel/smp.c14
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/Kconfig.debug36
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/ath79/clock.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c6
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/dec/int-handler.S40
-rw-r--r--arch/mips/include/asm/asmmacro.h1
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h4
-rw-r--r--arch/mips/include/asm/mach-paravirt/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mips-cm.h11
-rw-r--r--arch/mips/include/asm/mipsregs.h2
-rw-r--r--arch/mips/include/asm/uprobes.h1
-rw-r--r--arch/mips/kernel/cpu-probe.c53
-rw-r--r--arch/mips/kernel/genex.S3
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2
-rw-r--r--arch/mips/kernel/process.c8
-rw-r--r--arch/mips/kernel/setup.c9
-rw-r--r--arch/mips/kernel/smp-cps.c2
-rw-r--r--arch/mips/kernel/smp.c7
-rw-r--r--arch/mips/kernel/uprobes.c27
-rw-r--r--arch/mips/kernel/vdso.c8
-rw-r--r--arch/mips/math-emu/dsemul.c1
-rw-r--r--arch/mips/mm/c-r4k.c2
-rw-r--r--arch/mips/mm/init.c16
-rw-r--r--arch/mips/mti-malta/malta-setup.c8
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/sh/include/asm/atomic-llsc.h2
-rw-r--r--arch/sparc/include/asm/page_64.h1
-rw-r--r--arch/sparc/include/asm/smp_64.h2
-rw-r--r--arch/sparc/kernel/setup_64.c26
-rw-r--r--arch/sparc/kernel/smp_64.c14
-rw-r--r--arch/sparc/mm/fault_64.c1
-rw-r--r--arch/sparc/mm/init_64.c22
-rw-r--r--arch/sparc/mm/tlb.c35
-rw-r--r--arch/sparc/mm/tsb.c18
-rw-r--r--arch/x86/entry/entry_64.S4
-rw-r--r--arch/x86/entry/vdso/vdso2c.h2
-rw-r--r--arch/x86/events/intel/bts.c5
-rw-r--r--arch/x86/include/asm/tlbflush.h2
-rw-r--r--arch/x86/kernel/cpu/common.c23
-rw-r--r--arch/x86/kernel/setup.c4
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/platform/efi/efi_64.c2
-rw-r--r--block/blk-mq.c16
-rw-r--r--block/blk-throttle.c6
-rw-r--r--crypto/rsa-pkcs1pad.c41
-rw-r--r--drivers/acpi/nfit/core.c48
-rw-r--r--drivers/base/regmap/regmap.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c18
-rw-r--r--drivers/i2c/busses/i2c-qup.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/input/joydev.c6
-rw-r--r--drivers/input/touchscreen/silead.c16
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/irqchip/irq-mips-gic.c105
-rw-r--r--drivers/mmc/host/dw_mmc.c14
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mtd/nand/davinci_nand.c3
-rw-r--r--drivers/mtd/nand/mtk_ecc.c12
-rw-r--r--drivers/mtd/nand/mtk_nand.c7
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/net/can/dev.c27
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c10
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c15
-rw-r--r--drivers/nvdimm/core.c8
-rw-r--r--drivers/nvdimm/nd.h22
-rw-r--r--drivers/nvdimm/region_devs.c22
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/scsi/hosts.c2
-rw-r--r--drivers/scsi/scsi.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--fs/btrfs/extent-tree.c9
-rw-r--r--fs/btrfs/ioctl.c12
-rw-r--r--fs/configfs/file.c1
-rw-r--r--fs/ocfs2/aops.c10
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/pagemap.h1
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/net/sctp/structs.h13
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--kernel/cgroup.c29
-rw-r--r--kernel/cpuset.c19
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/trace/trace.c29
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/radix-tree.c8
-rw-r--r--mm/filemap.c114
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/ksm.c3
-rw-r--r--mm/memory.c12
-rw-r--r--mm/memory_hotplug.c10
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c19
-rw-r--r--mm/workingset.c10
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_output.c12
-rw-r--r--net/ipv6/ip6_gre.c1
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/sched/act_ife.c7
-rw-r--r--net/sched/sch_qfq.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sctp/chunk.c11
-rw-r--r--net/sctp/outqueue.c12
-rw-r--r--net/sctp/sctp_diag.c58
-rw-r--r--net/sctp/sm_make_chunk.c15
-rw-r--r--net/sctp/socket.c10
-rw-r--r--net/vmw_vsock/af_vsock.c6
-rw-r--r--scripts/recordmcount.c1
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--security/keys/encrypted-keys/encrypted.c11
-rw-r--r--tools/testing/nvdimm/test/nfit.c3
-rw-r--r--tools/testing/radix-tree/Makefile2
-rw-r--r--tools/testing/radix-tree/multiorder.c16
141 files changed, 899 insertions, 605 deletions
diff --git a/.mailmap b/.mailmap
index de22daefd9da..1dab0a156489 100644
--- a/.mailmap
+++ b/.mailmap
@@ -69,6 +69,7 @@ James Bottomley <jejb@mulgrave.(none)>
69James Bottomley <jejb@titanic.il.steeleye.com> 69James Bottomley <jejb@titanic.il.steeleye.com>
70James E Wilson <wilson@specifix.com> 70James E Wilson <wilson@specifix.com>
71James Ketrenos <jketreno@io.(none)> 71James Ketrenos <jketreno@io.(none)>
72Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
72<javier@osg.samsung.com> <javier.martinez@collabora.co.uk> 73<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
73Jean Tourrilhes <jt@hpl.hp.com> 74Jean Tourrilhes <jt@hpl.hp.com>
74Jeff Garzik <jgarzik@pretzel.yyz.us> 75Jeff Garzik <jgarzik@pretzel.yyz.us>
diff --git a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
index 1112e0d794e1..820fee4b77b6 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
@@ -13,6 +13,7 @@ Required properties:
13- touchscreen-size-y : See touchscreen.txt 13- touchscreen-size-y : See touchscreen.txt
14 14
15Optional properties: 15Optional properties:
16- firmware-name : File basename (string) for board specific firmware
16- touchscreen-inverted-x : See touchscreen.txt 17- touchscreen-inverted-x : See touchscreen.txt
17- touchscreen-inverted-y : See touchscreen.txt 18- touchscreen-inverted-y : See touchscreen.txt
18- touchscreen-swapped-x-y : See touchscreen.txt 19- touchscreen-swapped-x-y : See touchscreen.txt
diff --git a/MAINTAINERS b/MAINTAINERS
index 20de5f9af8ff..669909ed6f25 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8753,7 +8753,7 @@ F: drivers/oprofile/
8753F: include/linux/oprofile.h 8753F: include/linux/oprofile.h
8754 8754
8755ORACLE CLUSTER FILESYSTEM 2 (OCFS2) 8755ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
8756M: Mark Fasheh <mfasheh@suse.com> 8756M: Mark Fasheh <mfasheh@versity.com>
8757M: Joel Becker <jlbec@evilplan.org> 8757M: Joel Becker <jlbec@evilplan.org>
8758L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) 8758L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
8759W: http://ocfs2.wiki.kernel.org 8759W: http://ocfs2.wiki.kernel.org
@@ -11641,7 +11641,7 @@ F: Documentation/devicetree/bindings/thermal/
11641THERMAL/CPU_COOLING 11641THERMAL/CPU_COOLING
11642M: Amit Daniel Kachhap <amit.kachhap@gmail.com> 11642M: Amit Daniel Kachhap <amit.kachhap@gmail.com>
11643M: Viresh Kumar <viresh.kumar@linaro.org> 11643M: Viresh Kumar <viresh.kumar@linaro.org>
11644M: Javi Merino <javi.merino@arm.com> 11644M: Javi Merino <javi.merino@kernel.org>
11645L: linux-pm@vger.kernel.org 11645L: linux-pm@vger.kernel.org
11646S: Supported 11646S: Supported
11647F: Documentation/thermal/cpu-cooling-api.txt 11647F: Documentation/thermal/cpu-cooling-api.txt
diff --git a/Makefile b/Makefile
index 74e22c2f408b..80b8671d5c46 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 8 2PATCHLEVEL = 8
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Psychotic Stoned Sheep 5NAME = Psychotic Stoned Sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index af11c2f8f3b7..fc6d541549a2 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -779,7 +779,7 @@ __armv7_mmu_cache_on:
779 orrne r0, r0, #1 @ MMU enabled 779 orrne r0, r0, #1 @ MMU enabled
780 movne r1, #0xfffffffd @ domain 0 = client 780 movne r1, #0xfffffffd @ domain 0 = client
781 bic r6, r6, #1 << 31 @ 32-bit translation system 781 bic r6, r6, #1 << 31 @ 32-bit translation system
782 bic r6, r6, #3 << 0 @ use only ttbr0 782 bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
783 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 783 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
784 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 784 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
785 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control 785 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index d009f7911ffc..bf02dbd9ccda 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -111,7 +111,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
111/* The ARM override for dma_max_pfn() */ 111/* The ARM override for dma_max_pfn() */
112static inline unsigned long dma_max_pfn(struct device *dev) 112static inline unsigned long dma_max_pfn(struct device *dev)
113{ 113{
114 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); 114 return dma_to_pfn(dev, *dev->dma_mask);
115} 115}
116#define dma_max_pfn(dev) dma_max_pfn(dev) 116#define dma_max_pfn(dev) dma_max_pfn(dev)
117 117
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 40ecd5f514a2..f676febbb270 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -88,6 +88,8 @@ void __init arm_dt_init_cpu_maps(void)
88 return; 88 return;
89 89
90 for_each_child_of_node(cpus, cpu) { 90 for_each_child_of_node(cpus, cpu) {
91 const __be32 *cell;
92 int prop_bytes;
91 u32 hwid; 93 u32 hwid;
92 94
93 if (of_node_cmp(cpu->type, "cpu")) 95 if (of_node_cmp(cpu->type, "cpu"))
@@ -99,7 +101,8 @@ void __init arm_dt_init_cpu_maps(void)
99 * properties is considered invalid to build the 101 * properties is considered invalid to build the
100 * cpu_logical_map. 102 * cpu_logical_map.
101 */ 103 */
102 if (of_property_read_u32(cpu, "reg", &hwid)) { 104 cell = of_get_property(cpu, "reg", &prop_bytes);
105 if (!cell || prop_bytes < sizeof(*cell)) {
103 pr_debug(" * %s missing reg property\n", 106 pr_debug(" * %s missing reg property\n",
104 cpu->full_name); 107 cpu->full_name);
105 of_node_put(cpu); 108 of_node_put(cpu);
@@ -107,10 +110,15 @@ void __init arm_dt_init_cpu_maps(void)
107 } 110 }
108 111
109 /* 112 /*
110 * 8 MSBs must be set to 0 in the DT since the reg property 113 * Bits n:24 must be set to 0 in the DT since the reg property
111 * defines the MPIDR[23:0]. 114 * defines the MPIDR[23:0].
112 */ 115 */
113 if (hwid & ~MPIDR_HWID_BITMASK) { 116 do {
117 hwid = be32_to_cpu(*cell++);
118 prop_bytes -= sizeof(*cell);
119 } while (!hwid && prop_bytes > 0);
120
121 if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) {
114 of_node_put(cpu); 122 of_node_put(cpu);
115 return; 123 return;
116 } 124 }
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 4b6b3f72a215..b71420a12f26 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -61,8 +61,6 @@
61 61
62#define AARCH64_BREAK_KGDB_DYN_DBG \ 62#define AARCH64_BREAK_KGDB_DYN_DBG \
63 (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5)) 63 (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
64#define KGDB_DYN_BRK_INS_BYTE(x) \
65 ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
66 64
67#define CACHE_FLUSH_IS_SAFE 1 65#define CACHE_FLUSH_IS_SAFE 1
68 66
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 8c57f6496e56..e017a9493b92 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -19,10 +19,13 @@
19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */ 20 */
21 21
22#include <linux/bug.h>
22#include <linux/irq.h> 23#include <linux/irq.h>
23#include <linux/kdebug.h> 24#include <linux/kdebug.h>
24#include <linux/kgdb.h> 25#include <linux/kgdb.h>
25#include <linux/kprobes.h> 26#include <linux/kprobes.h>
27#include <asm/debug-monitors.h>
28#include <asm/insn.h>
26#include <asm/traps.h> 29#include <asm/traps.h>
27 30
28struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { 31struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
338 unregister_die_notifier(&kgdb_notifier); 341 unregister_die_notifier(&kgdb_notifier);
339} 342}
340 343
341/* 344struct kgdb_arch arch_kgdb_ops;
342 * ARM instructions are always in LE. 345
343 * Break instruction is encoded in LE format 346int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
344 */ 347{
345struct kgdb_arch arch_kgdb_ops = { 348 int err;
346 .gdb_bpt_instr = { 349
347 KGDB_DYN_BRK_INS_BYTE(0), 350 BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
348 KGDB_DYN_BRK_INS_BYTE(1), 351
349 KGDB_DYN_BRK_INS_BYTE(2), 352 err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
350 KGDB_DYN_BRK_INS_BYTE(3), 353 if (err)
351 } 354 return err;
352}; 355
356 return aarch64_insn_write((void *)bpt->bpt_addr,
357 (u32)AARCH64_BREAK_KGDB_DYN_DBG);
358}
359
360int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
361{
362 return aarch64_insn_write((void *)bpt->bpt_addr,
363 *(u32 *)bpt->saved_instr);
364}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d93d43352504..3ff173e92582 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
201 return ret; 201 return ret;
202} 202}
203 203
204static void smp_store_cpu_info(unsigned int cpuid)
205{
206 store_cpu_topology(cpuid);
207 numa_store_cpu_info(cpuid);
208}
209
210/* 204/*
211 * This is the secondary CPU boot entry. We're using this CPUs 205 * This is the secondary CPU boot entry. We're using this CPUs
212 * idle thread stack, but a set of temporary page tables. 206 * idle thread stack, but a set of temporary page tables.
@@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
254 */ 248 */
255 notify_cpu_starting(cpu); 249 notify_cpu_starting(cpu);
256 250
257 smp_store_cpu_info(cpu); 251 store_cpu_topology(cpu);
258 252
259 /* 253 /*
260 * OK, now it's safe to let the boot CPU continue. Wait for 254 * OK, now it's safe to let the boot CPU continue. Wait for
@@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
689{ 683{
690 int err; 684 int err;
691 unsigned int cpu; 685 unsigned int cpu;
686 unsigned int this_cpu;
692 687
693 init_cpu_topology(); 688 init_cpu_topology();
694 689
695 smp_store_cpu_info(smp_processor_id()); 690 this_cpu = smp_processor_id();
691 store_cpu_topology(this_cpu);
692 numa_store_cpu_info(this_cpu);
696 693
697 /* 694 /*
698 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set 695 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
719 continue; 716 continue;
720 717
721 set_cpu_present(cpu, true); 718 set_cpu_present(cpu, true);
719 numa_store_cpu_info(cpu);
722 } 720 }
723} 721}
724 722
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 26388562e300..212ff92920d2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,6 +65,7 @@ config MIPS
65 select ARCH_CLOCKSOURCE_DATA 65 select ARCH_CLOCKSOURCE_DATA
66 select HANDLE_DOMAIN_IRQ 66 select HANDLE_DOMAIN_IRQ
67 select HAVE_EXIT_THREAD 67 select HAVE_EXIT_THREAD
68 select HAVE_REGS_AND_STACK_ACCESS_API
68 69
69menu "Machine selection" 70menu "Machine selection"
70 71
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index f0e314ceb8ba..7f975b20b20c 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -113,42 +113,6 @@ config SPINLOCK_TEST
113 help 113 help
114 Add several files to the debugfs to test spinlock speed. 114 Add several files to the debugfs to test spinlock speed.
115 115
116if CPU_MIPSR6
117
118choice
119 prompt "Compact branch policy"
120 default MIPS_COMPACT_BRANCHES_OPTIMAL
121
122config MIPS_COMPACT_BRANCHES_NEVER
123 bool "Never (force delay slot branches)"
124 help
125 Pass the -mcompact-branches=never flag to the compiler in order to
126 force it to always emit branches with delay slots, and make no use
127 of the compact branch instructions introduced by MIPSr6. This is
128 useful if you suspect there may be an issue with compact branches in
129 either the compiler or the CPU.
130
131config MIPS_COMPACT_BRANCHES_OPTIMAL
132 bool "Optimal (use where beneficial)"
133 help
134 Pass the -mcompact-branches=optimal flag to the compiler in order for
135 it to make use of compact branch instructions where it deems them
136 beneficial, and use branches with delay slots elsewhere. This is the
137 default compiler behaviour, and should be used unless you have a
138 reason to choose otherwise.
139
140config MIPS_COMPACT_BRANCHES_ALWAYS
141 bool "Always (force compact branches)"
142 help
143 Pass the -mcompact-branches=always flag to the compiler in order to
144 force it to always emit compact branches, making no use of branch
145 instructions with delay slots. This can result in more compact code
146 which may be beneficial in some scenarios.
147
148endchoice
149
150endif # CPU_MIPSR6
151
152config SCACHE_DEBUGFS 116config SCACHE_DEBUGFS
153 bool "L2 cache debugfs entries" 117 bool "L2 cache debugfs entries"
154 depends on DEBUG_FS 118 depends on DEBUG_FS
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index efd7a9dc93c4..598ab2930fce 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -203,10 +203,6 @@ endif
203toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt) 203toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
204cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT 204cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
205 205
206cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
207cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
208cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
209
210# 206#
211# Firmware support 207# Firmware support
212# 208#
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 2e7378467c5c..cc3a1e33a600 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
96 struct clk *clk; 96 struct clk *clk;
97 97
98 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); 98 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
99 if (!clk) 99 if (IS_ERR(clk))
100 panic("failed to allocate %s clock structure", name); 100 panic("failed to allocate %s clock structure", name);
101 101
102 return clk; 102 return clk;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 5a9b87b7993e..c1eb1ff7c800 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1619,6 +1619,12 @@ static int __init octeon_irq_init_gpio(
1619 return -ENOMEM; 1619 return -ENOMEM;
1620 } 1620 }
1621 1621
1622 /*
1623 * Clear the OF_POPULATED flag that was set by of_irq_init()
1624 * so that all GPIO devices will be probed.
1625 */
1626 of_node_clear_flag(gpio_node, OF_POPULATED);
1627
1622 return 0; 1628 return 0;
1623} 1629}
1624/* 1630/*
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index b31fbc9d6eae..37a932d9148c 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
1059{ 1059{
1060 return of_platform_bus_probe(NULL, octeon_ids, NULL); 1060 return of_platform_bus_probe(NULL, octeon_ids, NULL);
1061} 1061}
1062device_initcall(octeon_publish_devices); 1062arch_initcall(octeon_publish_devices);
1063 1063
1064MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); 1064MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
1065MODULE_LICENSE("GPL"); 1065MODULE_LICENSE("GPL");
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index d7b99180c6e1..1910223a9c02 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -146,7 +146,25 @@
146 /* 146 /*
147 * Find irq with highest priority 147 * Find irq with highest priority
148 */ 148 */
149 PTR_LA t1,cpu_mask_nr_tbl 149 # open coded PTR_LA t1, cpu_mask_nr_tbl
150#if (_MIPS_SZPTR == 32)
151 # open coded la t1, cpu_mask_nr_tbl
152 lui t1, %hi(cpu_mask_nr_tbl)
153 addiu t1, %lo(cpu_mask_nr_tbl)
154
155#endif
156#if (_MIPS_SZPTR == 64)
157 # open coded dla t1, cpu_mask_nr_tbl
158 .set push
159 .set noat
160 lui t1, %highest(cpu_mask_nr_tbl)
161 lui AT, %hi(cpu_mask_nr_tbl)
162 daddiu t1, t1, %higher(cpu_mask_nr_tbl)
163 daddiu AT, AT, %lo(cpu_mask_nr_tbl)
164 dsll t1, 32
165 daddu t1, t1, AT
166 .set pop
167#endif
1501: lw t2,(t1) 1681: lw t2,(t1)
151 nop 169 nop
152 and t2,t0 170 and t2,t0
@@ -195,7 +213,25 @@
195 /* 213 /*
196 * Find irq with highest priority 214 * Find irq with highest priority
197 */ 215 */
198 PTR_LA t1,asic_mask_nr_tbl 216 # open coded PTR_LA t1,asic_mask_nr_tbl
217#if (_MIPS_SZPTR == 32)
218 # open coded la t1, asic_mask_nr_tbl
219 lui t1, %hi(asic_mask_nr_tbl)
220 addiu t1, %lo(asic_mask_nr_tbl)
221
222#endif
223#if (_MIPS_SZPTR == 64)
224 # open coded dla t1, asic_mask_nr_tbl
225 .set push
226 .set noat
227 lui t1, %highest(asic_mask_nr_tbl)
228 lui AT, %hi(asic_mask_nr_tbl)
229 daddiu t1, t1, %higher(asic_mask_nr_tbl)
230 daddiu AT, AT, %lo(asic_mask_nr_tbl)
231 dsll t1, 32
232 daddu t1, t1, AT
233 .set pop
234#endif
1992: lw t2,(t1) 2352: lw t2,(t1)
200 nop 236 nop
201 and t2,t0 237 and t2,t0
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 56584a659183..83054f79f72a 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -157,6 +157,7 @@
157 ldc1 $f28, THREAD_FPR28(\thread) 157 ldc1 $f28, THREAD_FPR28(\thread)
158 ldc1 $f30, THREAD_FPR30(\thread) 158 ldc1 $f30, THREAD_FPR30(\thread)
159 ctc1 \tmp, fcr31 159 ctc1 \tmp, fcr31
160 .set pop
160 .endm 161 .endm
161 162
162 .macro fpu_restore_16odd thread 163 .macro fpu_restore_16odd thread
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
index 0cf5ac1f7245..8ff2cbdf2c3e 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -15,8 +15,8 @@
15static inline bool __should_swizzle_bits(volatile void *a) 15static inline bool __should_swizzle_bits(volatile void *a)
16{ 16{
17 extern const bool octeon_should_swizzle_table[]; 17 extern const bool octeon_should_swizzle_table[];
18 u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
18 19
19 unsigned long did = ((unsigned long)a >> 40) & 0xff;
20 return octeon_should_swizzle_table[did]; 20 return octeon_should_swizzle_table[did];
21} 21}
22 22
@@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
29 29
30#define __should_swizzle_bits(a) false 30#define __should_swizzle_bits(a) false
31 31
32static inline bool __should_swizzle_addr(unsigned long p) 32static inline bool __should_swizzle_addr(u64 p)
33{ 33{
34 /* boot bus? */ 34 /* boot bus? */
35 return ((p >> 40) & 0xff) == 0; 35 return ((p >> 40) & 0xff) == 0;
diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
index 2f82bfa3a773..c9f5769dfc8f 100644
--- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
@@ -11,11 +11,13 @@
11#define CP0_EBASE $15, 1 11#define CP0_EBASE $15, 1
12 12
13 .macro kernel_entry_setup 13 .macro kernel_entry_setup
14#ifdef CONFIG_SMP
14 mfc0 t0, CP0_EBASE 15 mfc0 t0, CP0_EBASE
15 andi t0, t0, 0x3ff # CPUNum 16 andi t0, t0, 0x3ff # CPUNum
16 beqz t0, 1f 17 beqz t0, 1f
17 # CPUs other than zero goto smp_bootstrap 18 # CPUs other than zero goto smp_bootstrap
18 j smp_bootstrap 19 j smp_bootstrap
20#endif /* CONFIG_SMP */
19 21
201: 221:
21 .endm 23 .endm
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 58e7874e9347..4fafeefe65c2 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -458,10 +458,21 @@ static inline int mips_cm_revision(void)
458static inline unsigned int mips_cm_max_vp_width(void) 458static inline unsigned int mips_cm_max_vp_width(void)
459{ 459{
460 extern int smp_num_siblings; 460 extern int smp_num_siblings;
461 uint32_t cfg;
461 462
462 if (mips_cm_revision() >= CM_REV_CM3) 463 if (mips_cm_revision() >= CM_REV_CM3)
463 return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; 464 return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
464 465
466 if (mips_cm_present()) {
467 /*
468 * We presume that all cores in the system will have the same
469 * number of VP(E)s, and if that ever changes then this will
470 * need revisiting.
471 */
472 cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
473 return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
474 }
475
465 if (IS_ENABLED(CONFIG_SMP)) 476 if (IS_ENABLED(CONFIG_SMP))
466 return smp_num_siblings; 477 return smp_num_siblings;
467 478
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index def9d8d13f6e..7dd2dd47909a 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -660,8 +660,6 @@
660 660
661#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) 661#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
662#define MIPS_CONF7_AR (_ULCAST_(1) << 16) 662#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
663/* FTLB probability bits for R6 */
664#define MIPS_CONF7_FTLBP_SHIFT (18)
665 663
666/* WatchLo* register definitions */ 664/* WatchLo* register definitions */
667#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0) 665#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
index 34c325c674c4..70a4a2f173ff 100644
--- a/arch/mips/include/asm/uprobes.h
+++ b/arch/mips/include/asm/uprobes.h
@@ -36,7 +36,6 @@ struct arch_uprobe {
36 unsigned long resume_epc; 36 unsigned long resume_epc;
37 u32 insn[2]; 37 u32 insn[2];
38 u32 ixol[2]; 38 u32 ixol[2];
39 union mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
40}; 39};
41 40
42struct arch_uprobe_task { 41struct arch_uprobe_task {
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index a88d44247cc8..dd3175442c9e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -352,7 +352,12 @@ __setup("nohtw", htw_disable);
352static int mips_ftlb_disabled; 352static int mips_ftlb_disabled;
353static int mips_has_ftlb_configured; 353static int mips_has_ftlb_configured;
354 354
355static int set_ftlb_enable(struct cpuinfo_mips *c, int enable); 355enum ftlb_flags {
356 FTLB_EN = 1 << 0,
357 FTLB_SET_PROB = 1 << 1,
358};
359
360static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
356 361
357static int __init ftlb_disable(char *s) 362static int __init ftlb_disable(char *s)
358{ 363{
@@ -371,8 +376,6 @@ static int __init ftlb_disable(char *s)
371 return 1; 376 return 1;
372 } 377 }
373 378
374 back_to_back_c0_hazard();
375
376 config4 = read_c0_config4(); 379 config4 = read_c0_config4();
377 380
378 /* Check that FTLB has been disabled */ 381 /* Check that FTLB has been disabled */
@@ -531,7 +534,7 @@ static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
531 return 3; 534 return 3;
532} 535}
533 536
534static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) 537static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
535{ 538{
536 unsigned int config; 539 unsigned int config;
537 540
@@ -542,33 +545,33 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
542 case CPU_P6600: 545 case CPU_P6600:
543 /* proAptiv & related cores use Config6 to enable the FTLB */ 546 /* proAptiv & related cores use Config6 to enable the FTLB */
544 config = read_c0_config6(); 547 config = read_c0_config6();
545 /* Clear the old probability value */ 548
546 config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); 549 if (flags & FTLB_EN)
547 if (enable) 550 config |= MIPS_CONF6_FTLBEN;
548 /* Enable FTLB */
549 write_c0_config6(config |
550 (calculate_ftlb_probability(c)
551 << MIPS_CONF6_FTLBP_SHIFT)
552 | MIPS_CONF6_FTLBEN);
553 else 551 else
554 /* Disable FTLB */ 552 config &= ~MIPS_CONF6_FTLBEN;
555 write_c0_config6(config & ~MIPS_CONF6_FTLBEN); 553
554 if (flags & FTLB_SET_PROB) {
555 config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
556 config |= calculate_ftlb_probability(c)
557 << MIPS_CONF6_FTLBP_SHIFT;
558 }
559
560 write_c0_config6(config);
561 back_to_back_c0_hazard();
556 break; 562 break;
557 case CPU_I6400: 563 case CPU_I6400:
558 /* I6400 & related cores use Config7 to configure FTLB */ 564 /* There's no way to disable the FTLB */
559 config = read_c0_config7(); 565 if (!(flags & FTLB_EN))
560 /* Clear the old probability value */ 566 return 1;
561 config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT); 567 return 0;
562 write_c0_config7(config | (calculate_ftlb_probability(c)
563 << MIPS_CONF7_FTLBP_SHIFT));
564 break;
565 case CPU_LOONGSON3: 568 case CPU_LOONGSON3:
566 /* Flush ITLB, DTLB, VTLB and FTLB */ 569 /* Flush ITLB, DTLB, VTLB and FTLB */
567 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | 570 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
568 LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); 571 LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
569 /* Loongson-3 cores use Config6 to enable the FTLB */ 572 /* Loongson-3 cores use Config6 to enable the FTLB */
570 config = read_c0_config6(); 573 config = read_c0_config6();
571 if (enable) 574 if (flags & FTLB_EN)
572 /* Enable FTLB */ 575 /* Enable FTLB */
573 write_c0_config6(config & ~MIPS_CONF6_FTLBDIS); 576 write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
574 else 577 else
@@ -788,6 +791,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
788 PAGE_SIZE, config4); 791 PAGE_SIZE, config4);
789 /* Switch FTLB off */ 792 /* Switch FTLB off */
790 set_ftlb_enable(c, 0); 793 set_ftlb_enable(c, 0);
794 mips_ftlb_disabled = 1;
791 break; 795 break;
792 } 796 }
793 c->tlbsizeftlbsets = 1 << 797 c->tlbsizeftlbsets = 1 <<
@@ -852,7 +856,7 @@ static void decode_configs(struct cpuinfo_mips *c)
852 c->scache.flags = MIPS_CACHE_NOT_PRESENT; 856 c->scache.flags = MIPS_CACHE_NOT_PRESENT;
853 857
854 /* Enable FTLB if present and not disabled */ 858 /* Enable FTLB if present and not disabled */
855 set_ftlb_enable(c, !mips_ftlb_disabled); 859 set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
856 860
857 ok = decode_config0(c); /* Read Config registers. */ 861 ok = decode_config0(c); /* Read Config registers. */
858 BUG_ON(!ok); /* Arch spec violation! */ 862 BUG_ON(!ok); /* Arch spec violation! */
@@ -902,6 +906,9 @@ static void decode_configs(struct cpuinfo_mips *c)
902 } 906 }
903 } 907 }
904 908
909 /* configure the FTLB write probability */
910 set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
911
905 mips_probe_watch_registers(c); 912 mips_probe_watch_registers(c);
906 913
907#ifndef CONFIG_MIPS_CPS 914#ifndef CONFIG_MIPS_CPS
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 17326a90d53c..dc0b29612891 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -142,9 +142,8 @@ LEAF(__r4k_wait)
142 PTR_LA k1, __r4k_wait 142 PTR_LA k1, __r4k_wait
143 ori k0, 0x1f /* 32 byte rollback region */ 143 ori k0, 0x1f /* 32 byte rollback region */
144 xori k0, 0x1f 144 xori k0, 0x1f
145 bne k0, k1, 9f 145 bne k0, k1, \handler
146 MTC0 k0, CP0_EPC 146 MTC0 k0, CP0_EPC
1479:
148 .set pop 147 .set pop
149 .endm 148 .endm
150 149
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index c3372cac6db2..0a7e10b5f9e3 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1164,7 +1164,9 @@ fpu_emul:
1164 regs->regs[31] = r31; 1164 regs->regs[31] = r31;
1165 regs->cp0_epc = epc; 1165 regs->cp0_epc = epc;
1166 if (!used_math()) { /* First time FPU user. */ 1166 if (!used_math()) { /* First time FPU user. */
1167 preempt_disable();
1167 err = init_fpu(); 1168 err = init_fpu();
1169 preempt_enable();
1168 set_used_math(); 1170 set_used_math();
1169 } 1171 }
1170 lose_fpu(1); /* Save FPU state for the emulator. */ 1172 lose_fpu(1); /* Save FPU state for the emulator. */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 7429ad09fbe3..d2d061520a23 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
605 return -EOPNOTSUPP; 605 return -EOPNOTSUPP;
606 606
607 /* Avoid inadvertently triggering emulation */ 607 /* Avoid inadvertently triggering emulation */
608 if ((value & PR_FP_MODE_FR) && cpu_has_fpu && 608 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
609 !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 609 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
610 return -EOPNOTSUPP; 610 return -EOPNOTSUPP;
611 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) 611 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
612 return -EOPNOTSUPP; 612 return -EOPNOTSUPP;
613 613
614 /* FR = 0 not supported in MIPS R6 */ 614 /* FR = 0 not supported in MIPS R6 */
615 if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) 615 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
616 return -EOPNOTSUPP; 616 return -EOPNOTSUPP;
617 617
618 /* Proceed with the mode switch */ 618 /* Proceed with the mode switch */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 36cf8d65c47d..0d57909d9026 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
87 int x = boot_mem_map.nr_map; 87 int x = boot_mem_map.nr_map;
88 int i; 88 int i;
89 89
90 /*
91 * If the region reaches the top of the physical address space, adjust
92 * the size slightly so that (start + size) doesn't overflow
93 */
94 if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
95 --size;
96
90 /* Sanity check */ 97 /* Sanity check */
91 if (start + size < start) { 98 if (start + size < start) {
92 pr_warn("Trying to add an invalid memory region, skipped\n"); 99 pr_warn("Trying to add an invalid memory region, skipped\n");
@@ -757,7 +764,6 @@ static void __init arch_mem_init(char **cmdline_p)
757 device_tree_init(); 764 device_tree_init();
758 sparse_init(); 765 sparse_init();
759 plat_swiotlb_setup(); 766 plat_swiotlb_setup();
760 paging_init();
761 767
762 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 768 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
763 /* Tell bootmem about cma reserved memblock section */ 769 /* Tell bootmem about cma reserved memblock section */
@@ -870,6 +876,7 @@ void __init setup_arch(char **cmdline_p)
870 prefill_possible_map(); 876 prefill_possible_map();
871 877
872 cpu_cache_init(); 878 cpu_cache_init();
879 paging_init();
873} 880}
874 881
875unsigned long kernelsp[NR_CPUS]; 882unsigned long kernelsp[NR_CPUS];
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index e9d9fc6c754c..6183ad84cc73 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -513,7 +513,7 @@ static void cps_cpu_die(unsigned int cpu)
513 * in which case the CPC will refuse to power down the core. 513 * in which case the CPC will refuse to power down the core.
514 */ 514 */
515 do { 515 do {
516 mips_cm_lock_other(core, vpe_id); 516 mips_cm_lock_other(core, 0);
517 mips_cpc_lock_other(core); 517 mips_cpc_lock_other(core);
518 stat = read_cpc_co_stat_conf(); 518 stat = read_cpc_co_stat_conf();
519 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; 519 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index f95f094f36e4..b0baf48951fa 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
322 cpumask_set_cpu(cpu, &cpu_coherent_mask); 322 cpumask_set_cpu(cpu, &cpu_coherent_mask);
323 notify_cpu_starting(cpu); 323 notify_cpu_starting(cpu);
324 324
325 cpumask_set_cpu(cpu, &cpu_callin_map);
326 synchronise_count_slave(cpu);
327
325 set_cpu_online(cpu, true); 328 set_cpu_online(cpu, true);
326 329
327 set_cpu_sibling_map(cpu); 330 set_cpu_sibling_map(cpu);
@@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
329 332
330 calculate_cpu_foreign_map(); 333 calculate_cpu_foreign_map();
331 334
332 cpumask_set_cpu(cpu, &cpu_callin_map);
333
334 synchronise_count_slave(cpu);
335
336 /* 335 /*
337 * irq will be enabled in ->smp_finish(), enabling it too early 336 * irq will be enabled in ->smp_finish(), enabling it too early
338 * is dangerous. 337 * is dangerous.
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
index 8452d933a645..4c7c1558944a 100644
--- a/arch/mips/kernel/uprobes.c
+++ b/arch/mips/kernel/uprobes.c
@@ -157,7 +157,6 @@ bool is_trap_insn(uprobe_opcode_t *insn)
157int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) 157int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
158{ 158{
159 struct uprobe_task *utask = current->utask; 159 struct uprobe_task *utask = current->utask;
160 union mips_instruction insn;
161 160
162 /* 161 /*
163 * Now find the EPC where to resume after the breakpoint has been 162 * Now find the EPC where to resume after the breakpoint has been
@@ -168,10 +167,10 @@ int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
168 unsigned long epc; 167 unsigned long epc;
169 168
170 epc = regs->cp0_epc; 169 epc = regs->cp0_epc;
171 __compute_return_epc_for_insn(regs, insn); 170 __compute_return_epc_for_insn(regs,
171 (union mips_instruction) aup->insn[0]);
172 aup->resume_epc = regs->cp0_epc; 172 aup->resume_epc = regs->cp0_epc;
173 } 173 }
174
175 utask->autask.saved_trap_nr = current->thread.trap_nr; 174 utask->autask.saved_trap_nr = current->thread.trap_nr;
176 current->thread.trap_nr = UPROBE_TRAP_NR; 175 current->thread.trap_nr = UPROBE_TRAP_NR;
177 regs->cp0_epc = current->utask->xol_vaddr; 176 regs->cp0_epc = current->utask->xol_vaddr;
@@ -222,7 +221,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
222 return NOTIFY_DONE; 221 return NOTIFY_DONE;
223 222
224 switch (val) { 223 switch (val) {
225 case DIE_BREAK: 224 case DIE_UPROBE:
226 if (uprobe_pre_sstep_notifier(regs)) 225 if (uprobe_pre_sstep_notifier(regs))
227 return NOTIFY_STOP; 226 return NOTIFY_STOP;
228 break; 227 break;
@@ -257,7 +256,7 @@ unsigned long arch_uretprobe_hijack_return_addr(
257 ra = regs->regs[31]; 256 ra = regs->regs[31];
258 257
259 /* Replace the return address with the trampoline address */ 258 /* Replace the return address with the trampoline address */
260 regs->regs[31] = ra; 259 regs->regs[31] = trampoline_vaddr;
261 260
262 return ra; 261 return ra;
263} 262}
@@ -280,24 +279,6 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
280 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); 279 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
281} 280}
282 281
283/**
284 * set_orig_insn - Restore the original instruction.
285 * @mm: the probed process address space.
286 * @auprobe: arch specific probepoint information.
287 * @vaddr: the virtual address to insert the opcode.
288 *
289 * For mm @mm, restore the original opcode (opcode) at @vaddr.
290 * Return 0 (success) or a negative errno.
291 *
292 * This overrides the weak version in kernel/events/uprobes.c.
293 */
294int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
295 unsigned long vaddr)
296{
297 return uprobe_write_opcode(mm, vaddr,
298 *(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
299}
300
301void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 282void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
302 void *src, unsigned long len) 283 void *src, unsigned long len)
303{ 284{
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 9abe447a4b48..f9dbfb14af33 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
39static void __init init_vdso_image(struct mips_vdso_image *image) 39static void __init init_vdso_image(struct mips_vdso_image *image)
40{ 40{
41 unsigned long num_pages, i; 41 unsigned long num_pages, i;
42 unsigned long data_pfn;
42 43
43 BUG_ON(!PAGE_ALIGNED(image->data)); 44 BUG_ON(!PAGE_ALIGNED(image->data));
44 BUG_ON(!PAGE_ALIGNED(image->size)); 45 BUG_ON(!PAGE_ALIGNED(image->size));
45 46
46 num_pages = image->size / PAGE_SIZE; 47 num_pages = image->size / PAGE_SIZE;
47 48
48 for (i = 0; i < num_pages; i++) { 49 data_pfn = __phys_to_pfn(__pa_symbol(image->data));
49 image->mapping.pages[i] = 50 for (i = 0; i < num_pages; i++)
50 virt_to_page(image->data + (i * PAGE_SIZE)); 51 image->mapping.pages[i] = pfn_to_page(data_pfn + i);
51 }
52} 52}
53 53
54static int __init init_vdso(void) 54static int __init init_vdso(void)
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 72a4642eee2c..4a094f7acb3d 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
298 /* Set EPC to return to post-branch instruction */ 298 /* Set EPC to return to post-branch instruction */
299 xcp->cp0_epc = current->thread.bd_emu_cont_pc; 299 xcp->cp0_epc = current->thread.bd_emu_cont_pc;
300 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); 300 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
301 MIPS_FPU_EMU_INC_STATS(ds_emul);
301 return true; 302 return true;
302} 303}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index cd72805b64a7..fa7d8d3790bf 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
800 * If address-based cache ops don't require an SMP call, then 800 * If address-based cache ops don't require an SMP call, then
801 * use them exclusively for small flushes. 801 * use them exclusively for small flushes.
802 */ 802 */
803 size = start - end; 803 size = end - start;
804 cache_size = icache_size; 804 cache_size = icache_size;
805 if (!cpu_has_ic_fills_f_dc) { 805 if (!cpu_has_ic_fills_f_dc) {
806 size *= 2; 806 size *= 2;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index a5509e7dcad2..72f7478ee068 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
261{ 261{
262 struct maar_config cfg[BOOT_MEM_MAP_MAX]; 262 struct maar_config cfg[BOOT_MEM_MAP_MAX];
263 unsigned i, num_configured, num_cfg = 0; 263 unsigned i, num_configured, num_cfg = 0;
264 phys_addr_t skip;
265 264
266 for (i = 0; i < boot_mem_map.nr_map; i++) { 265 for (i = 0; i < boot_mem_map.nr_map; i++) {
267 switch (boot_mem_map.map[i].type) { 266 switch (boot_mem_map.map[i].type) {
@@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
272 continue; 271 continue;
273 } 272 }
274 273
275 skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); 274 /* Round lower up */
276
277 cfg[num_cfg].lower = boot_mem_map.map[i].addr; 275 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
278 cfg[num_cfg].lower += skip; 276 cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
279 277
280 cfg[num_cfg].upper = cfg[num_cfg].lower; 278 /* Round upper down */
281 cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; 279 cfg[num_cfg].upper = boot_mem_map.map[i].addr +
282 cfg[num_cfg].upper -= skip; 280 boot_mem_map.map[i].size;
281 cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
283 282
284 cfg[num_cfg].attrs = MIPS_MAAR_S; 283 cfg[num_cfg].attrs = MIPS_MAAR_S;
285 num_cfg++; 284 num_cfg++;
@@ -441,6 +440,9 @@ static inline void mem_init_free_highmem(void)
441#ifdef CONFIG_HIGHMEM 440#ifdef CONFIG_HIGHMEM
442 unsigned long tmp; 441 unsigned long tmp;
443 442
443 if (cpu_has_dc_aliases)
444 return;
445
444 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 446 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
445 struct page *page = pfn_to_page(tmp); 447 struct page *page = pfn_to_page(tmp);
446 448
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index ec5b21678fad..7e7364b0501e 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -39,6 +39,9 @@
39#include <linux/console.h> 39#include <linux/console.h>
40#endif 40#endif
41 41
42#define ROCIT_CONFIG_GEN0 0x1f403000
43#define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7)
44
42extern void malta_be_init(void); 45extern void malta_be_init(void);
43extern int malta_be_handler(struct pt_regs *regs, int is_fixup); 46extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
44 47
@@ -107,6 +110,8 @@ static void __init fd_activate(void)
107static int __init plat_enable_iocoherency(void) 110static int __init plat_enable_iocoherency(void)
108{ 111{
109 int supported = 0; 112 int supported = 0;
113 u32 cfg;
114
110 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { 115 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) {
111 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { 116 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) {
112 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; 117 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN;
@@ -129,7 +134,8 @@ static int __init plat_enable_iocoherency(void)
129 } else if (mips_cm_numiocu() != 0) { 134 } else if (mips_cm_numiocu() != 0) {
130 /* Nothing special needs to be done to enable coherency */ 135 /* Nothing special needs to be done to enable coherency */
131 pr_info("CMP IOCU detected\n"); 136 pr_info("CMP IOCU detected\n");
132 if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { 137 cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0));
138 if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) {
133 pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); 139 pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
134 return 0; 140 return 0;
135 } 141 }
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index bc0c91e84ca0..38a5c657ffd3 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -124,6 +124,13 @@ static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
124 r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); 124 r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
125} 125}
126 126
127static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
128{
129 unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
130
131 return (resource_flags & flags) == flags;
132}
133
127static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) 134static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
128{ 135{
129 phb->ioda.pe_array[pe_no].phb = phb; 136 phb->ioda.pe_array[pe_no].phb = phb;
@@ -2871,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
2871 res = &pdev->resource[i + PCI_IOV_RESOURCES]; 2878 res = &pdev->resource[i + PCI_IOV_RESOURCES];
2872 if (!res->flags || res->parent) 2879 if (!res->flags || res->parent)
2873 continue; 2880 continue;
2874 if (!pnv_pci_is_m64(phb, res)) { 2881 if (!pnv_pci_is_m64_flags(res->flags)) {
2875 dev_warn(&pdev->dev, "Don't support SR-IOV with" 2882 dev_warn(&pdev->dev, "Don't support SR-IOV with"
2876 " non M64 VF BAR%d: %pR. \n", 2883 " non M64 VF BAR%d: %pR. \n",
2877 i, res); 2884 i, res);
@@ -3096,7 +3103,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
3096 * alignment for any 64-bit resource, PCIe doesn't care and 3103 * alignment for any 64-bit resource, PCIe doesn't care and
3097 * bridges only do 64-bit prefetchable anyway. 3104 * bridges only do 64-bit prefetchable anyway.
3098 */ 3105 */
3099 if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) 3106 if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
3100 return phb->ioda.m64_segsize; 3107 return phb->ioda.m64_segsize;
3101 if (type & IORESOURCE_MEM) 3108 if (type & IORESOURCE_MEM)
3102 return phb->ioda.m32_segsize; 3109 return phb->ioda.m32_segsize;
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index caea2c45f6c2..1d159ce50f5a 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
60" movco.l %0, @%3 \n" \ 60" movco.l %0, @%3 \n" \
61" bf 1b \n" \ 61" bf 1b \n" \
62" synco \n" \ 62" synco \n" \
63 : "=&z" (temp), "=&z" (res) \ 63 : "=&z" (temp), "=&r" (res) \
64 : "r" (i), "r" (&v->counter) \ 64 : "r" (i), "r" (&v->counter) \
65 : "t"); \ 65 : "t"); \
66 \ 66 \
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 8c2a8c937540..c1263fc390db 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -25,6 +25,7 @@
25#define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) 25#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
26#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 26#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
27#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 27#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
28#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
28#endif 29#endif
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index 26d9e7726867..ce2233f7e662 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -43,6 +43,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
43int hard_smp_processor_id(void); 43int hard_smp_processor_id(void);
44#define raw_smp_processor_id() (current_thread_info()->cpu) 44#define raw_smp_processor_id() (current_thread_info()->cpu)
45 45
46void smp_fill_in_cpu_possible_map(void);
46void smp_fill_in_sib_core_maps(void); 47void smp_fill_in_sib_core_maps(void);
47void cpu_play_dead(void); 48void cpu_play_dead(void);
48 49
@@ -72,6 +73,7 @@ void __cpu_die(unsigned int cpu);
72#define smp_fill_in_sib_core_maps() do { } while (0) 73#define smp_fill_in_sib_core_maps() do { } while (0)
73#define smp_fetch_global_regs() do { } while (0) 74#define smp_fetch_global_regs() do { } while (0)
74#define smp_fetch_global_pmu() do { } while (0) 75#define smp_fetch_global_pmu() do { } while (0)
76#define smp_fill_in_cpu_possible_map() do { } while (0)
75 77
76#endif /* !(CONFIG_SMP) */ 78#endif /* !(CONFIG_SMP) */
77 79
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 599f1207eed2..6b7331d198e9 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -31,6 +31,7 @@
31#include <linux/initrd.h> 31#include <linux/initrd.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/start_kernel.h> 33#include <linux/start_kernel.h>
34#include <linux/bootmem.h>
34 35
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/processor.h> 37#include <asm/processor.h>
@@ -50,6 +51,8 @@
50#include <asm/elf.h> 51#include <asm/elf.h>
51#include <asm/mdesc.h> 52#include <asm/mdesc.h>
52#include <asm/cacheflush.h> 53#include <asm/cacheflush.h>
54#include <asm/dma.h>
55#include <asm/irq.h>
53 56
54#ifdef CONFIG_IP_PNP 57#ifdef CONFIG_IP_PNP
55#include <net/ipconfig.h> 58#include <net/ipconfig.h>
@@ -590,6 +593,22 @@ static void __init init_sparc64_elf_hwcap(void)
590 pause_patch(); 593 pause_patch();
591} 594}
592 595
596void __init alloc_irqstack_bootmem(void)
597{
598 unsigned int i, node;
599
600 for_each_possible_cpu(i) {
601 node = cpu_to_node(i);
602
603 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
604 THREAD_SIZE,
605 THREAD_SIZE, 0);
606 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
607 THREAD_SIZE,
608 THREAD_SIZE, 0);
609 }
610}
611
593void __init setup_arch(char **cmdline_p) 612void __init setup_arch(char **cmdline_p)
594{ 613{
595 /* Initialize PROM console and command line. */ 614 /* Initialize PROM console and command line. */
@@ -650,6 +669,13 @@ void __init setup_arch(char **cmdline_p)
650 669
651 paging_init(); 670 paging_init();
652 init_sparc64_elf_hwcap(); 671 init_sparc64_elf_hwcap();
672 smp_fill_in_cpu_possible_map();
673 /*
674 * Once the OF device tree and MDESC have been setup and nr_cpus has
675 * been parsed, we know the list of possible cpus. Therefore we can
676 * allocate the IRQ stacks.
677 */
678 alloc_irqstack_bootmem();
653} 679}
654 680
655extern int stop_a_enabled; 681extern int stop_a_enabled;
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 8a6151a628ce..d3035ba6cd31 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1227,6 +1227,20 @@ void __init smp_setup_processor_id(void)
1227 xcall_deliver_impl = hypervisor_xcall_deliver; 1227 xcall_deliver_impl = hypervisor_xcall_deliver;
1228} 1228}
1229 1229
1230void __init smp_fill_in_cpu_possible_map(void)
1231{
1232 int possible_cpus = num_possible_cpus();
1233 int i;
1234
1235 if (possible_cpus > nr_cpu_ids)
1236 possible_cpus = nr_cpu_ids;
1237
1238 for (i = 0; i < possible_cpus; i++)
1239 set_cpu_possible(i, true);
1240 for (; i < NR_CPUS; i++)
1241 set_cpu_possible(i, false);
1242}
1243
1230void smp_fill_in_sib_core_maps(void) 1244void smp_fill_in_sib_core_maps(void)
1231{ 1245{
1232 unsigned int i; 1246 unsigned int i;
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index e16fdd28a931..3f291d8c57f7 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -484,6 +484,7 @@ good_area:
484 tsb_grow(mm, MM_TSB_BASE, mm_rss); 484 tsb_grow(mm, MM_TSB_BASE, mm_rss);
485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 485#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; 486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
487 mm_rss *= REAL_HPAGE_PER_HPAGE;
487 if (unlikely(mm_rss > 488 if (unlikely(mm_rss >
488 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { 489 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
489 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) 490 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 65457c9f1365..7ac6b62fb7c1 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1160,7 +1160,7 @@ int __node_distance(int from, int to)
1160 return numa_latency[from][to]; 1160 return numa_latency[from][to];
1161} 1161}
1162 1162
1163static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) 1163static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1164{ 1164{
1165 int i; 1165 int i;
1166 1166
@@ -1173,8 +1173,8 @@ static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1173 return i; 1173 return i;
1174} 1174}
1175 1175
1176static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, 1176static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1177 int index) 1177 u64 grp, int index)
1178{ 1178{
1179 u64 arc; 1179 u64 arc;
1180 1180
@@ -2081,7 +2081,6 @@ void __init paging_init(void)
2081{ 2081{
2082 unsigned long end_pfn, shift, phys_base; 2082 unsigned long end_pfn, shift, phys_base;
2083 unsigned long real_end, i; 2083 unsigned long real_end, i;
2084 int node;
2085 2084
2086 setup_page_offset(); 2085 setup_page_offset();
2087 2086
@@ -2250,21 +2249,6 @@ void __init paging_init(void)
2250 /* Setup bootmem... */ 2249 /* Setup bootmem... */
2251 last_valid_pfn = end_pfn = bootmem_init(phys_base); 2250 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2252 2251
2253 /* Once the OF device tree and MDESC have been setup, we know
2254 * the list of possible cpus. Therefore we can allocate the
2255 * IRQ stacks.
2256 */
2257 for_each_possible_cpu(i) {
2258 node = cpu_to_node(i);
2259
2260 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2261 THREAD_SIZE,
2262 THREAD_SIZE, 0);
2263 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2264 THREAD_SIZE,
2265 THREAD_SIZE, 0);
2266 }
2267
2268 kernel_physical_mapping_init(); 2252 kernel_physical_mapping_init();
2269 2253
2270 { 2254 {
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3659d37b4d81..c56a195c9071 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
174 return; 174 return;
175 175
176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { 176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177 if (pmd_val(pmd) & _PAGE_PMD_HUGE) 177 /*
178 mm->context.thp_pte_count++; 178 * Note that this routine only sets pmds for THP pages.
179 else 179 * Hugetlb pages are handled elsewhere. We need to check
180 mm->context.thp_pte_count--; 180 * for huge zero page. Huge zero pages are like hugetlb
181 * pages in that there is no RSS, but there is the need
182 * for TSB entries. So, huge zero page counts go into
183 * hugetlb_pte_count.
184 */
185 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
186 if (is_huge_zero_page(pmd_page(pmd)))
187 mm->context.hugetlb_pte_count++;
188 else
189 mm->context.thp_pte_count++;
190 } else {
191 if (is_huge_zero_page(pmd_page(orig)))
192 mm->context.hugetlb_pte_count--;
193 else
194 mm->context.thp_pte_count--;
195 }
181 196
182 /* Do not try to allocate the TSB hash table if we 197 /* Do not try to allocate the TSB hash table if we
183 * don't have one already. We have various locks held 198 * don't have one already. We have various locks held
@@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
204 } 219 }
205} 220}
206 221
222/*
223 * This routine is only called when splitting a THP
224 */
207void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 225void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
208 pmd_t *pmdp) 226 pmd_t *pmdp)
209{ 227{
@@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
213 231
214 set_pmd_at(vma->vm_mm, address, pmdp, entry); 232 set_pmd_at(vma->vm_mm, address, pmdp, entry);
215 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 233 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
234
235 /*
236 * set_pmd_at() will not be called in a way to decrement
237 * thp_pte_count when splitting a THP, so do it now.
238 * Sanity check pmd before doing the actual decrement.
239 */
240 if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
241 !is_huge_zero_page(pmd_page(entry)))
242 (vma->vm_mm)->context.thp_pte_count--;
216} 243}
217 244
218void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 245void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 6725ed45580e..f2b77112e9d8 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -469,8 +469,10 @@ retry_tsb_alloc:
469 469
470int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 470int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
471{ 471{
472 unsigned long mm_rss = get_mm_rss(mm);
472#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 473#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
473 unsigned long total_huge_pte_count; 474 unsigned long saved_hugetlb_pte_count;
475 unsigned long saved_thp_pte_count;
474#endif 476#endif
475 unsigned int i; 477 unsigned int i;
476 478
@@ -483,10 +485,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
483 * will re-increment the counters as the parent PTEs are 485 * will re-increment the counters as the parent PTEs are
484 * copied into the child address space. 486 * copied into the child address space.
485 */ 487 */
486 total_huge_pte_count = mm->context.hugetlb_pte_count + 488 saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
487 mm->context.thp_pte_count; 489 saved_thp_pte_count = mm->context.thp_pte_count;
488 mm->context.hugetlb_pte_count = 0; 490 mm->context.hugetlb_pte_count = 0;
489 mm->context.thp_pte_count = 0; 491 mm->context.thp_pte_count = 0;
492
493 mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
490#endif 494#endif
491 495
492 /* copy_mm() copies over the parent's mm_struct before calling 496 /* copy_mm() copies over the parent's mm_struct before calling
@@ -499,11 +503,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
499 /* If this is fork, inherit the parent's TSB size. We would 503 /* If this is fork, inherit the parent's TSB size. We would
500 * grow it to that size on the first page fault anyways. 504 * grow it to that size on the first page fault anyways.
501 */ 505 */
502 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 506 tsb_grow(mm, MM_TSB_BASE, mm_rss);
503 507
504#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 508#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
505 if (unlikely(total_huge_pte_count)) 509 if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
506 tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); 510 tsb_grow(mm, MM_TSB_HUGE,
511 (saved_hugetlb_pte_count + saved_thp_pte_count) *
512 REAL_HPAGE_PER_HPAGE);
507#endif 513#endif
508 514
509 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 515 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d172c619c449..02fff3ebfb87 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1002,7 +1002,6 @@ ENTRY(error_entry)
1002 testb $3, CS+8(%rsp) 1002 testb $3, CS+8(%rsp)
1003 jz .Lerror_kernelspace 1003 jz .Lerror_kernelspace
1004 1004
1005.Lerror_entry_from_usermode_swapgs:
1006 /* 1005 /*
1007 * We entered from user mode or we're pretending to have entered 1006 * We entered from user mode or we're pretending to have entered
1008 * from user mode due to an IRET fault. 1007 * from user mode due to an IRET fault.
@@ -1045,7 +1044,8 @@ ENTRY(error_entry)
1045 * gsbase and proceed. We'll fix up the exception and land in 1044 * gsbase and proceed. We'll fix up the exception and land in
1046 * .Lgs_change's error handler with kernel gsbase. 1045 * .Lgs_change's error handler with kernel gsbase.
1047 */ 1046 */
1048 jmp .Lerror_entry_from_usermode_swapgs 1047 SWAPGS
1048 jmp .Lerror_entry_done
1049 1049
1050.Lbstep_iret: 1050.Lbstep_iret:
1051 /* Fix truncated RIP */ 1051 /* Fix truncated RIP */
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 4f741192846d..3dab75f2a673 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -22,7 +22,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
22 22
23 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff)); 23 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
24 24
25 if (hdr->e_type != ET_DYN) 25 if (GET_LE(&hdr->e_type) != ET_DYN)
26 fail("input is not a shared object\n"); 26 fail("input is not a shared object\n");
27 27
28 /* Walk the segment table. */ 28 /* Walk the segment table. */
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index bdcd6510992c..982c9e31daca 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -455,7 +455,7 @@ int intel_bts_interrupt(void)
455 * The only surefire way of knowing if this NMI is ours is by checking 455 * The only surefire way of knowing if this NMI is ours is by checking
456 * the write ptr against the PMI threshold. 456 * the write ptr against the PMI threshold.
457 */ 457 */
458 if (ds->bts_index >= ds->bts_interrupt_threshold) 458 if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
459 handled = 1; 459 handled = 1;
460 460
461 /* 461 /*
@@ -584,7 +584,8 @@ static __init int bts_init(void)
584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) 584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
585 return -ENODEV; 585 return -ENODEV;
586 586
587 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE; 587 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
588 PERF_PMU_CAP_EXCLUSIVE;
588 bts_pmu.task_ctx_nr = perf_sw_context; 589 bts_pmu.task_ctx_nr = perf_sw_context;
589 bts_pmu.event_init = bts_event_init; 590 bts_pmu.event_init = bts_event_init;
590 bts_pmu.add = bts_event_add; 591 bts_pmu.add = bts_event_add;
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 6fa85944af83..dee8a70382ba 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
81/* Initialize cr4 shadow for this CPU. */ 81/* Initialize cr4 shadow for this CPU. */
82static inline void cr4_init_shadow(void) 82static inline void cr4_init_shadow(void)
83{ 83{
84 this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 84 this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
85} 85}
86 86
87/* Set in this cpu's CR4. */ 87/* Set in this cpu's CR4. */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 809eda03c527..bcc9ccc220c9 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -804,21 +804,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
804 identify_cpu_without_cpuid(c); 804 identify_cpu_without_cpuid(c);
805 805
806 /* cyrix could have cpuid enabled via c_identify()*/ 806 /* cyrix could have cpuid enabled via c_identify()*/
807 if (!have_cpuid_p()) 807 if (have_cpuid_p()) {
808 return; 808 cpu_detect(c);
809 get_cpu_vendor(c);
810 get_cpu_cap(c);
809 811
810 cpu_detect(c); 812 if (this_cpu->c_early_init)
811 get_cpu_vendor(c); 813 this_cpu->c_early_init(c);
812 get_cpu_cap(c);
813
814 if (this_cpu->c_early_init)
815 this_cpu->c_early_init(c);
816 814
817 c->cpu_index = 0; 815 c->cpu_index = 0;
818 filter_cpuid_features(c, false); 816 filter_cpuid_features(c, false);
819 817
820 if (this_cpu->c_bsp_init) 818 if (this_cpu->c_bsp_init)
821 this_cpu->c_bsp_init(c); 819 this_cpu->c_bsp_init(c);
820 }
822 821
823 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 822 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
824 fpu__init_system(c); 823 fpu__init_system(c);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 0fa60f5f5a16..98c9cd6f3b5d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1137,9 +1137,7 @@ void __init setup_arch(char **cmdline_p)
1137 * auditing all the early-boot CR4 manipulation would be needed to 1137 * auditing all the early-boot CR4 manipulation would be needed to
1138 * rule it out. 1138 * rule it out.
1139 */ 1139 */
1140 if (boot_cpu_data.cpuid_level >= 0) 1140 mmu_cr4_features = __read_cr4_safe();
1141 /* A CPU has %cr4 if and only if it has CPUID. */
1142 mmu_cr4_features = __read_cr4();
1143 1141
1144 memblock_set_current_limit(get_max_mapped()); 1142 memblock_set_current_limit(get_max_mapped());
1145 1143
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 849dc09fa4f0..e3353c97d086 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
917 } 917 }
918} 918}
919 919
920static int populate_pmd(struct cpa_data *cpa, 920static long populate_pmd(struct cpa_data *cpa,
921 unsigned long start, unsigned long end, 921 unsigned long start, unsigned long end,
922 unsigned num_pages, pud_t *pud, pgprot_t pgprot) 922 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
923{ 923{
924 unsigned int cur_pages = 0; 924 long cur_pages = 0;
925 pmd_t *pmd; 925 pmd_t *pmd;
926 pgprot_t pmd_pgprot; 926 pgprot_t pmd_pgprot;
927 927
@@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
991 return num_pages; 991 return num_pages;
992} 992}
993 993
994static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, 994static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
995 pgprot_t pgprot) 995 pgprot_t pgprot)
996{ 996{
997 pud_t *pud; 997 pud_t *pud;
998 unsigned long end; 998 unsigned long end;
999 int cur_pages = 0; 999 long cur_pages = 0;
1000 pgprot_t pud_pgprot; 1000 pgprot_t pud_pgprot;
1001 1001
1002 end = start + (cpa->numpages << PAGE_SHIFT); 1002 end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
1052 1052
1053 /* Map trailing leftover */ 1053 /* Map trailing leftover */
1054 if (start < end) { 1054 if (start < end) {
1055 int tmp; 1055 long tmp;
1056 1056
1057 pud = pud_offset(pgd, start); 1057 pud = pud_offset(pgd, start);
1058 if (pud_none(*pud)) 1058 if (pud_none(*pud))
@@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1078 pgprot_t pgprot = __pgprot(_KERNPG_TABLE); 1078 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1079 pud_t *pud = NULL; /* shut up gcc */ 1079 pud_t *pud = NULL; /* shut up gcc */
1080 pgd_t *pgd_entry; 1080 pgd_t *pgd_entry;
1081 int ret; 1081 long ret;
1082 1082
1083 pgd_entry = cpa->pgd + pgd_index(addr); 1083 pgd_entry = cpa->pgd + pgd_index(addr);
1084 1084
@@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
1327 1327
1328static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 1328static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1329{ 1329{
1330 int ret, numpages = cpa->numpages; 1330 unsigned long numpages = cpa->numpages;
1331 int ret;
1331 1332
1332 while (numpages) { 1333 while (numpages) {
1333 /* 1334 /*
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 677e29e29473..8dd3784eb075 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
245 * text and allocate a new stack because we can't rely on the 245 * text and allocate a new stack because we can't rely on the
246 * stack pointer being < 4GB. 246 * stack pointer being < 4GB.
247 */ 247 */
248 if (!IS_ENABLED(CONFIG_EFI_MIXED)) 248 if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
249 return 0; 249 return 0;
250 250
251 /* 251 /*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 13f5a6c1de76..c207fa9870eb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
296 if (ret) 296 if (ret)
297 return ERR_PTR(ret); 297 return ERR_PTR(ret);
298 298
299 /*
300 * Check if the hardware context is actually mapped to anything.
301 * If not tell the caller that it should skip this queue.
302 */
299 hctx = q->queue_hw_ctx[hctx_idx]; 303 hctx = q->queue_hw_ctx[hctx_idx];
304 if (!blk_mq_hw_queue_mapped(hctx)) {
305 ret = -EXDEV;
306 goto out_queue_exit;
307 }
300 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 308 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
301 309
302 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 310 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
303 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 311 rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
304 if (!rq) { 312 if (!rq) {
305 blk_queue_exit(q); 313 ret = -EWOULDBLOCK;
306 return ERR_PTR(-EWOULDBLOCK); 314 goto out_queue_exit;
307 } 315 }
308 316
309 return rq; 317 return rq;
318
319out_queue_exit:
320 blk_queue_exit(q);
321 return ERR_PTR(ret);
310} 322}
311EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 323EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
312 324
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index f1aba26f4719..a3ea8260c94c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
780 /* 780 /*
781 * If previous slice expired, start a new one otherwise renew/extend 781 * If previous slice expired, start a new one otherwise renew/extend
782 * existing slice to make sure it is at least throtl_slice interval 782 * existing slice to make sure it is at least throtl_slice interval
783 * long since now. 783 * long since now. New slice is started only for empty throttle group.
784 * If there is queued bio, that means there should be an active
785 * slice and it should be extended instead.
784 */ 786 */
785 if (throtl_slice_used(tg, rw)) 787 if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
786 throtl_start_new_slice(tg, rw); 788 throtl_start_new_slice(tg, rw);
787 else { 789 else {
788 if (time_before(tg->slice_end[rw], jiffies + throtl_slice)) 790 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 877019a6d3ea..8baab4307f7b 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
298 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 298 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
299 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 299 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
300 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 300 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
301 unsigned int dst_len;
301 unsigned int pos; 302 unsigned int pos;
302 303 u8 *out_buf;
303 if (err == -EOVERFLOW)
304 /* Decrypted value had no leading 0 byte */
305 err = -EINVAL;
306 304
307 if (err) 305 if (err)
308 goto done; 306 goto done;
309 307
310 if (req_ctx->child_req.dst_len != ctx->key_size - 1) { 308 err = -EINVAL;
311 err = -EINVAL; 309 dst_len = req_ctx->child_req.dst_len;
310 if (dst_len < ctx->key_size - 1)
312 goto done; 311 goto done;
312
313 out_buf = req_ctx->out_buf;
314 if (dst_len == ctx->key_size) {
315 if (out_buf[0] != 0x00)
316 /* Decrypted value had no leading 0 byte */
317 goto done;
318
319 dst_len--;
320 out_buf++;
313 } 321 }
314 322
315 if (req_ctx->out_buf[0] != 0x02) { 323 if (out_buf[0] != 0x02)
316 err = -EINVAL;
317 goto done; 324 goto done;
318 } 325
319 for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) 326 for (pos = 1; pos < dst_len; pos++)
320 if (req_ctx->out_buf[pos] == 0x00) 327 if (out_buf[pos] == 0x00)
321 break; 328 break;
322 if (pos < 9 || pos == req_ctx->child_req.dst_len) { 329 if (pos < 9 || pos == dst_len)
323 err = -EINVAL;
324 goto done; 330 goto done;
325 }
326 pos++; 331 pos++;
327 332
328 if (req->dst_len < req_ctx->child_req.dst_len - pos) 333 err = 0;
334
335 if (req->dst_len < dst_len - pos)
329 err = -EOVERFLOW; 336 err = -EOVERFLOW;
330 req->dst_len = req_ctx->child_req.dst_len - pos; 337 req->dst_len = dst_len - pos;
331 338
332 if (!err) 339 if (!err)
333 sg_copy_from_buffer(req->dst, 340 sg_copy_from_buffer(req->dst,
334 sg_nents_for_len(req->dst, req->dst_len), 341 sg_nents_for_len(req->dst, req->dst_len),
335 req_ctx->out_buf + pos, req->dst_len); 342 out_buf + pos, req->dst_len);
336 343
337done: 344done:
338 kzfree(req_ctx->out_buf); 345 kzfree(req_ctx->out_buf);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 80cc7c089a15..e1d5ea6d5e40 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -94,54 +94,50 @@ static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
94 return to_acpi_device(acpi_desc->dev); 94 return to_acpi_device(acpi_desc->dev);
95} 95}
96 96
97static int xlat_status(void *buf, unsigned int cmd) 97static int xlat_status(void *buf, unsigned int cmd, u32 status)
98{ 98{
99 struct nd_cmd_clear_error *clear_err; 99 struct nd_cmd_clear_error *clear_err;
100 struct nd_cmd_ars_status *ars_status; 100 struct nd_cmd_ars_status *ars_status;
101 struct nd_cmd_ars_start *ars_start;
102 struct nd_cmd_ars_cap *ars_cap;
103 u16 flags; 101 u16 flags;
104 102
105 switch (cmd) { 103 switch (cmd) {
106 case ND_CMD_ARS_CAP: 104 case ND_CMD_ARS_CAP:
107 ars_cap = buf; 105 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
108 if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE)
109 return -ENOTTY; 106 return -ENOTTY;
110 107
111 /* Command failed */ 108 /* Command failed */
112 if (ars_cap->status & 0xffff) 109 if (status & 0xffff)
113 return -EIO; 110 return -EIO;
114 111
115 /* No supported scan types for this range */ 112 /* No supported scan types for this range */
116 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 113 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
117 if ((ars_cap->status >> 16 & flags) == 0) 114 if ((status >> 16 & flags) == 0)
118 return -ENOTTY; 115 return -ENOTTY;
119 break; 116 break;
120 case ND_CMD_ARS_START: 117 case ND_CMD_ARS_START:
121 ars_start = buf;
122 /* ARS is in progress */ 118 /* ARS is in progress */
123 if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) 119 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
124 return -EBUSY; 120 return -EBUSY;
125 121
126 /* Command failed */ 122 /* Command failed */
127 if (ars_start->status & 0xffff) 123 if (status & 0xffff)
128 return -EIO; 124 return -EIO;
129 break; 125 break;
130 case ND_CMD_ARS_STATUS: 126 case ND_CMD_ARS_STATUS:
131 ars_status = buf; 127 ars_status = buf;
132 /* Command failed */ 128 /* Command failed */
133 if (ars_status->status & 0xffff) 129 if (status & 0xffff)
134 return -EIO; 130 return -EIO;
135 /* Check extended status (Upper two bytes) */ 131 /* Check extended status (Upper two bytes) */
136 if (ars_status->status == NFIT_ARS_STATUS_DONE) 132 if (status == NFIT_ARS_STATUS_DONE)
137 return 0; 133 return 0;
138 134
139 /* ARS is in progress */ 135 /* ARS is in progress */
140 if (ars_status->status == NFIT_ARS_STATUS_BUSY) 136 if (status == NFIT_ARS_STATUS_BUSY)
141 return -EBUSY; 137 return -EBUSY;
142 138
143 /* No ARS performed for the current boot */ 139 /* No ARS performed for the current boot */
144 if (ars_status->status == NFIT_ARS_STATUS_NONE) 140 if (status == NFIT_ARS_STATUS_NONE)
145 return -EAGAIN; 141 return -EAGAIN;
146 142
147 /* 143 /*
@@ -149,19 +145,19 @@ static int xlat_status(void *buf, unsigned int cmd)
149 * agent wants the scan to stop. If we didn't overflow 145 * agent wants the scan to stop. If we didn't overflow
150 * then just continue with the returned results. 146 * then just continue with the returned results.
151 */ 147 */
152 if (ars_status->status == NFIT_ARS_STATUS_INTR) { 148 if (status == NFIT_ARS_STATUS_INTR) {
153 if (ars_status->flags & NFIT_ARS_F_OVERFLOW) 149 if (ars_status->flags & NFIT_ARS_F_OVERFLOW)
154 return -ENOSPC; 150 return -ENOSPC;
155 return 0; 151 return 0;
156 } 152 }
157 153
158 /* Unknown status */ 154 /* Unknown status */
159 if (ars_status->status >> 16) 155 if (status >> 16)
160 return -EIO; 156 return -EIO;
161 break; 157 break;
162 case ND_CMD_CLEAR_ERROR: 158 case ND_CMD_CLEAR_ERROR:
163 clear_err = buf; 159 clear_err = buf;
164 if (clear_err->status & 0xffff) 160 if (status & 0xffff)
165 return -EIO; 161 return -EIO;
166 if (!clear_err->cleared) 162 if (!clear_err->cleared)
167 return -EIO; 163 return -EIO;
@@ -172,6 +168,9 @@ static int xlat_status(void *buf, unsigned int cmd)
172 break; 168 break;
173 } 169 }
174 170
171 /* all other non-zero status results in an error */
172 if (status)
173 return -EIO;
175 return 0; 174 return 0;
176} 175}
177 176
@@ -186,10 +185,10 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
186 struct nd_cmd_pkg *call_pkg = NULL; 185 struct nd_cmd_pkg *call_pkg = NULL;
187 const char *cmd_name, *dimm_name; 186 const char *cmd_name, *dimm_name;
188 unsigned long cmd_mask, dsm_mask; 187 unsigned long cmd_mask, dsm_mask;
188 u32 offset, fw_status = 0;
189 acpi_handle handle; 189 acpi_handle handle;
190 unsigned int func; 190 unsigned int func;
191 const u8 *uuid; 191 const u8 *uuid;
192 u32 offset;
193 int rc, i; 192 int rc, i;
194 193
195 func = cmd; 194 func = cmd;
@@ -317,6 +316,15 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
317 out_obj->buffer.pointer + offset, out_size); 316 out_obj->buffer.pointer + offset, out_size);
318 offset += out_size; 317 offset += out_size;
319 } 318 }
319
320 /*
321 * Set fw_status for all the commands with a known format to be
322 * later interpreted by xlat_status().
323 */
324 if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
325 || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
326 fw_status = *(u32 *) out_obj->buffer.pointer;
327
320 if (offset + in_buf.buffer.length < buf_len) { 328 if (offset + in_buf.buffer.length < buf_len) {
321 if (i >= 1) { 329 if (i >= 1) {
322 /* 330 /*
@@ -325,7 +333,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
325 */ 333 */
326 rc = buf_len - offset - in_buf.buffer.length; 334 rc = buf_len - offset - in_buf.buffer.length;
327 if (cmd_rc) 335 if (cmd_rc)
328 *cmd_rc = xlat_status(buf, cmd); 336 *cmd_rc = xlat_status(buf, cmd, fw_status);
329 } else { 337 } else {
330 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 338 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
331 __func__, dimm_name, cmd_name, buf_len, 339 __func__, dimm_name, cmd_name, buf_len,
@@ -335,7 +343,7 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
335 } else { 343 } else {
336 rc = 0; 344 rc = 0;
337 if (cmd_rc) 345 if (cmd_rc)
338 *cmd_rc = xlat_status(buf, cmd); 346 *cmd_rc = xlat_status(buf, cmd, fw_status);
339 } 347 }
340 348
341 out: 349 out:
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 25d26bb18970..e964d068874d 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1475 1475
1476 kfree(buf); 1476 kfree(buf);
1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1478 regcache_drop_region(map, reg, reg + 1); 1478 /* regcache_drop_region() takes lock that we already have,
1479 * thus call map->cache_ops->drop() directly
1480 */
1481 if (map->cache_ops && map->cache_ops->drop)
1482 map->cache_ops->drop(map, reg, reg + 1);
1479 } 1483 }
1480 1484
1481 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); 1485 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index df7ab2458e50..39c01b942ee4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1708,11 +1708,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1708 1708
1709 DRM_INFO("amdgpu: finishing device.\n"); 1709 DRM_INFO("amdgpu: finishing device.\n");
1710 adev->shutdown = true; 1710 adev->shutdown = true;
1711 drm_crtc_force_disable_all(adev->ddev);
1711 /* evict vram memory */ 1712 /* evict vram memory */
1712 amdgpu_bo_evict_vram(adev); 1713 amdgpu_bo_evict_vram(adev);
1713 amdgpu_ib_pool_fini(adev); 1714 amdgpu_ib_pool_fini(adev);
1714 amdgpu_fence_driver_fini(adev); 1715 amdgpu_fence_driver_fini(adev);
1715 drm_crtc_force_disable_all(adev->ddev);
1716 amdgpu_fbdev_fini(adev); 1716 amdgpu_fbdev_fini(adev);
1717 r = amdgpu_fini(adev); 1717 r = amdgpu_fini(adev);
1718 kfree(adev->ip_block_status); 1718 kfree(adev->ip_block_status);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 7ea8aa7ca408..6bc712f32c8b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -175,6 +175,7 @@ struct nvkm_device_func {
175 void (*fini)(struct nvkm_device *, bool suspend); 175 void (*fini)(struct nvkm_device *, bool suspend);
176 resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); 176 resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
177 resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); 177 resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
178 bool cpu_coherent;
178}; 179};
179 180
180struct nvkm_device_quirk { 181struct nvkm_device_quirk {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6190035edfea..864323b19cf7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -209,7 +209,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
209 nvbo->tile_flags = tile_flags; 209 nvbo->tile_flags = tile_flags;
210 nvbo->bo.bdev = &drm->ttm.bdev; 210 nvbo->bo.bdev = &drm->ttm.bdev;
211 211
212 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; 212 if (!nvxx_device(&drm->device)->func->cpu_coherent)
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
213 214
214 nvbo->page_shift = 12; 215 nvbo->page_shift = 12;
215 if (drm->client.vm) { 216 if (drm->client.vm) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index b1b693219db3..62ad0300cfa5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,6 +1614,7 @@ nvkm_device_pci_func = {
1614 .fini = nvkm_device_pci_fini, 1614 .fini = nvkm_device_pci_fini,
1615 .resource_addr = nvkm_device_pci_resource_addr, 1615 .resource_addr = nvkm_device_pci_resource_addr,
1616 .resource_size = nvkm_device_pci_resource_size, 1616 .resource_size = nvkm_device_pci_resource_size,
1617 .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
1617}; 1618};
1618 1619
1619int 1620int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 939682f18788..9b638bd905ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -245,6 +245,7 @@ nvkm_device_tegra_func = {
245 .fini = nvkm_device_tegra_fini, 245 .fini = nvkm_device_tegra_fini,
246 .resource_addr = nvkm_device_tegra_resource_addr, 246 .resource_addr = nvkm_device_tegra_resource_addr,
247 .resource_size = nvkm_device_tegra_resource_size, 247 .resource_size = nvkm_device_tegra_resource_size,
248 .cpu_coherent = false,
248}; 249};
249 250
250int 251int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
index edec30fd3ecd..0a7b6ed5ed28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
@@ -37,7 +37,10 @@ nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *base, int cookie)
37{ 37{
38 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 38 struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
39 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; 39 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem;
40
41 mutex_lock(&chan->fifo->base.engine.subdev.mutex);
40 nvkm_ramht_remove(imem->ramht, cookie); 42 nvkm_ramht_remove(imem->ramht, cookie);
43 mutex_unlock(&chan->fifo->base.engine.subdev.mutex);
41} 44}
42 45
43static int 46static int
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e6abc09b67e3..1f78ec2548ec 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3015,6 +3015,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
3015 if (rdev->pdev->device == 0x6811 && 3015 if (rdev->pdev->device == 0x6811 &&
3016 rdev->pdev->revision == 0x81) 3016 rdev->pdev->revision == 0x81)
3017 max_mclk = 120000; 3017 max_mclk = 120000;
3018 /* limit sclk/mclk on Jet parts for stability */
3019 if (rdev->pdev->device == 0x6665 &&
3020 rdev->pdev->revision == 0xc3) {
3021 max_sclk = 75000;
3022 max_mclk = 80000;
3023 }
3018 3024
3019 if (rps->vce_active) { 3025 if (rps->vce_active) {
3020 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; 3026 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 9688bfa92ccd..611b6b9bb3cb 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -122,7 +122,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
122 return 0; 122 return 0;
123 cmd = urb->transfer_buffer; 123 cmd = urb->transfer_buffer;
124 124
125 for (i = y; i < height ; i++) { 125 for (i = y; i < y + height ; i++) {
126 const int line_offset = fb->base.pitches[0] * i; 126 const int line_offset = fb->base.pitches[0] * i;
127 const int byte_offset = line_offset + (x * bpp); 127 const int byte_offset = line_offset + (x * bpp);
128 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); 128 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 137125b5eae7..5ce71ce7b6c4 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
773 /* Set the number of I2C channel instance */ 773 /* Set the number of I2C channel instance */
774 adap_info->ch_num = id->driver_data; 774 adap_info->ch_num = id->driver_data;
775 775
776 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
777 KBUILD_MODNAME, adap_info);
778 if (ret) {
779 pch_pci_err(pdev, "request_irq FAILED\n");
780 goto err_request_irq;
781 }
782
783 for (i = 0; i < adap_info->ch_num; i++) { 776 for (i = 0; i < adap_info->ch_num; i++) {
784 pch_adap = &adap_info->pch_data[i].pch_adapter; 777 pch_adap = &adap_info->pch_data[i].pch_adapter;
785 adap_info->pch_i2c_suspended = false; 778 adap_info->pch_i2c_suspended = false;
@@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
797 790
798 pch_adap->dev.of_node = pdev->dev.of_node; 791 pch_adap->dev.of_node = pdev->dev.of_node;
799 pch_adap->dev.parent = &pdev->dev; 792 pch_adap->dev.parent = &pdev->dev;
793 }
794
795 ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
796 KBUILD_MODNAME, adap_info);
797 if (ret) {
798 pch_pci_err(pdev, "request_irq FAILED\n");
799 goto err_request_irq;
800 }
801
802 for (i = 0; i < adap_info->ch_num; i++) {
803 pch_adap = &adap_info->pch_data[i].pch_adapter;
800 804
801 pch_i2c_init(&adap_info->pch_data[i]); 805 pch_i2c_init(&adap_info->pch_data[i]);
802 806
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 501bd15cb78e..a8497cfdae6f 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
1599#ifdef CONFIG_PM_SLEEP 1599#ifdef CONFIG_PM_SLEEP
1600static int qup_i2c_suspend(struct device *device) 1600static int qup_i2c_suspend(struct device *device)
1601{ 1601{
1602 qup_i2c_pm_suspend_runtime(device); 1602 if (!pm_runtime_suspended(device))
1603 return qup_i2c_pm_suspend_runtime(device);
1603 return 0; 1604 return 0;
1604} 1605}
1605 1606
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 528e755c468f..3278ebf1cc5c 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
164 /* Only select the channel if its different from the last channel */ 164 /* Only select the channel if its different from the last channel */
165 if (data->last_chan != regval) { 165 if (data->last_chan != regval) {
166 ret = pca954x_reg_write(muxc->parent, client, regval); 166 ret = pca954x_reg_write(muxc->parent, client, regval);
167 data->last_chan = regval; 167 data->last_chan = ret ? 0 : regval;
168 } 168 }
169 169
170 return ret; 170 return ret;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 5d11fea3c8ec..f3135ae22df4 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -950,6 +950,12 @@ static const struct input_device_id joydev_ids[] = {
950 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | 950 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
951 INPUT_DEVICE_ID_MATCH_ABSBIT, 951 INPUT_DEVICE_ID_MATCH_ABSBIT,
952 .evbit = { BIT_MASK(EV_ABS) }, 952 .evbit = { BIT_MASK(EV_ABS) },
953 .absbit = { BIT_MASK(ABS_Z) },
954 },
955 {
956 .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
957 INPUT_DEVICE_ID_MATCH_ABSBIT,
958 .evbit = { BIT_MASK(EV_ABS) },
953 .absbit = { BIT_MASK(ABS_WHEEL) }, 959 .absbit = { BIT_MASK(ABS_WHEEL) },
954 }, 960 },
955 { 961 {
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index b2744a64e933..f502c8488be8 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
390 data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ 390 data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
391 } 391 }
392 392
393 error = device_property_read_string(dev, "touchscreen-fw-name", &str); 393 error = device_property_read_string(dev, "firmware-name", &str);
394 if (!error) 394 if (!error)
395 snprintf(data->fw_name, sizeof(data->fw_name), "%s", str); 395 snprintf(data->fw_name, sizeof(data->fw_name),
396 "silead/%s", str);
396 else 397 else
397 dev_dbg(dev, "Firmware file name read error. Using default."); 398 dev_dbg(dev, "Firmware file name read error. Using default.");
398} 399}
@@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
410 if (!acpi_id) 411 if (!acpi_id)
411 return -ENODEV; 412 return -ENODEV;
412 413
413 snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", 414 snprintf(data->fw_name, sizeof(data->fw_name),
414 acpi_id->id); 415 "silead/%s.fw", acpi_id->id);
415 416
416 for (i = 0; i < strlen(data->fw_name); i++) 417 for (i = 0; i < strlen(data->fw_name); i++)
417 data->fw_name[i] = tolower(data->fw_name[i]); 418 data->fw_name[i] = tolower(data->fw_name[i]);
418 } else { 419 } else {
419 snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", 420 snprintf(data->fw_name, sizeof(data->fw_name),
420 id->name); 421 "silead/%s.fw", id->name);
421 } 422 }
422 423
423 return 0; 424 return 0;
@@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
426static int silead_ts_set_default_fw_name(struct silead_ts_data *data, 427static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
427 const struct i2c_device_id *id) 428 const struct i2c_device_id *id)
428{ 429{
429 snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name); 430 snprintf(data->fw_name, sizeof(data->fw_name),
431 "silead/%s.fw", id->name);
430 return 0; 432 return 0;
431} 433}
432#endif 434#endif
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index ede5672ab34d..da6c0ba61d4f 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
548static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 548static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
549 unsigned long cluster_id) 549 unsigned long cluster_id)
550{ 550{
551 int cpu = *base_cpu; 551 int next_cpu, cpu = *base_cpu;
552 unsigned long mpidr = cpu_logical_map(cpu); 552 unsigned long mpidr = cpu_logical_map(cpu);
553 u16 tlist = 0; 553 u16 tlist = 0;
554 554
@@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
562 562
563 tlist |= 1 << (mpidr & 0xf); 563 tlist |= 1 << (mpidr & 0xf);
564 564
565 cpu = cpumask_next(cpu, mask); 565 next_cpu = cpumask_next(cpu, mask);
566 if (cpu >= nr_cpu_ids) 566 if (next_cpu >= nr_cpu_ids)
567 goto out; 567 goto out;
568 cpu = next_cpu;
568 569
569 mpidr = cpu_logical_map(cpu); 570 mpidr = cpu_logical_map(cpu);
570 571
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 83f498393a7f..6185696405d5 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
638 if (!gic_local_irq_is_routable(intr)) 638 if (!gic_local_irq_is_routable(intr))
639 return -EPERM; 639 return -EPERM;
640 640
641 /*
642 * HACK: These are all really percpu interrupts, but the rest
643 * of the MIPS kernel code does not use the percpu IRQ API for
644 * the CP0 timer and performance counter interrupts.
645 */
646 switch (intr) {
647 case GIC_LOCAL_INT_TIMER:
648 case GIC_LOCAL_INT_PERFCTR:
649 case GIC_LOCAL_INT_FDC:
650 irq_set_chip_and_handler(virq,
651 &gic_all_vpes_local_irq_controller,
652 handle_percpu_irq);
653 break;
654 default:
655 irq_set_chip_and_handler(virq,
656 &gic_local_irq_controller,
657 handle_percpu_devid_irq);
658 irq_set_percpu_devid(virq);
659 break;
660 }
661
662 spin_lock_irqsave(&gic_lock, flags); 641 spin_lock_irqsave(&gic_lock, flags);
663 for (i = 0; i < gic_vpes; i++) { 642 for (i = 0; i < gic_vpes; i++) {
664 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; 643 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
724 return 0; 703 return 0;
725} 704}
726 705
727static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 706static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
728 irq_hw_number_t hw) 707 unsigned int hwirq)
729{ 708{
730 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 709 struct irq_chip *chip;
731 return gic_local_irq_domain_map(d, virq, hw); 710 int err;
711
712 if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
713 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
714 &gic_level_irq_controller,
715 NULL);
716 } else {
717 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
718 case GIC_LOCAL_INT_TIMER:
719 case GIC_LOCAL_INT_PERFCTR:
720 case GIC_LOCAL_INT_FDC:
721 /*
722 * HACK: These are all really percpu interrupts, but
723 * the rest of the MIPS kernel code does not use the
724 * percpu IRQ API for them.
725 */
726 chip = &gic_all_vpes_local_irq_controller;
727 irq_set_handler(virq, handle_percpu_irq);
728 break;
729
730 default:
731 chip = &gic_local_irq_controller;
732 irq_set_handler(virq, handle_percpu_devid_irq);
733 irq_set_percpu_devid(virq);
734 break;
735 }
732 736
733 irq_set_chip_and_handler(virq, &gic_level_irq_controller, 737 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
734 handle_level_irq); 738 chip, NULL);
739 }
735 740
736 return gic_shared_irq_domain_map(d, virq, hw, 0); 741 return err;
737} 742}
738 743
739static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 744static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
744 int cpu, ret, i; 749 int cpu, ret, i;
745 750
746 if (spec->type == GIC_DEVICE) { 751 if (spec->type == GIC_DEVICE) {
747 /* verify that it doesn't conflict with an IPI irq */ 752 /* verify that shared irqs don't conflict with an IPI irq */
748 if (test_bit(spec->hwirq, ipi_resrv)) 753 if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
754 test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
749 return -EBUSY; 755 return -EBUSY;
750 756
751 hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); 757 return gic_setup_dev_chip(d, virq, spec->hwirq);
752
753 return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
754 &gic_level_irq_controller,
755 NULL);
756 } else { 758 } else {
757 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); 759 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
758 if (base_hwirq == gic_shared_intrs) { 760 if (base_hwirq == gic_shared_intrs) {
@@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
821} 823}
822 824
823static const struct irq_domain_ops gic_irq_domain_ops = { 825static const struct irq_domain_ops gic_irq_domain_ops = {
824 .map = gic_irq_domain_map,
825 .alloc = gic_irq_domain_alloc, 826 .alloc = gic_irq_domain_alloc,
826 .free = gic_irq_domain_free, 827 .free = gic_irq_domain_free,
827 .match = gic_irq_domain_match, 828 .match = gic_irq_domain_match,
@@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
852 struct irq_fwspec *fwspec = arg; 853 struct irq_fwspec *fwspec = arg;
853 struct gic_irq_spec spec = { 854 struct gic_irq_spec spec = {
854 .type = GIC_DEVICE, 855 .type = GIC_DEVICE,
855 .hwirq = fwspec->param[1],
856 }; 856 };
857 int i, ret; 857 int i, ret;
858 bool is_shared = fwspec->param[0] == GIC_SHARED;
859 858
860 if (is_shared) { 859 if (fwspec->param[0] == GIC_SHARED)
861 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); 860 spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
862 if (ret) 861 else
863 return ret; 862 spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
864 }
865
866 for (i = 0; i < nr_irqs; i++) {
867 irq_hw_number_t hwirq;
868 863
869 if (is_shared) 864 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
870 hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i); 865 if (ret)
871 else 866 return ret;
872 hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
873 867
874 ret = irq_domain_set_hwirq_and_chip(d, virq + i, 868 for (i = 0; i < nr_irqs; i++) {
875 hwirq, 869 ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
876 &gic_level_irq_controller,
877 NULL);
878 if (ret) 870 if (ret)
879 goto error; 871 goto error;
880 } 872 }
@@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
896static void gic_dev_domain_activate(struct irq_domain *domain, 888static void gic_dev_domain_activate(struct irq_domain *domain,
897 struct irq_data *d) 889 struct irq_data *d)
898{ 890{
899 gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); 891 if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
892 gic_local_irq_domain_map(domain, d->irq, d->hwirq);
893 else
894 gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
900} 895}
901 896
902static struct irq_domain_ops gic_dev_domain_ops = { 897static struct irq_domain_ops gic_dev_domain_ops = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 32380d5d4f6b..767af2026f8b 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1112 1112
1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1114 1114
1115 dev_info(&slot->mmc->class_dev, 1115 if (clock != slot->__clk_old || force_clkinit)
1116 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1116 dev_info(&slot->mmc->class_dev,
1117 slot->id, host->bus_hz, clock, 1117 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1118 div ? ((host->bus_hz / div) >> 1) : 1118 slot->id, host->bus_hz, clock,
1119 host->bus_hz, div); 1119 div ? ((host->bus_hz / div) >> 1) :
1120 host->bus_hz, div);
1120 1121
1121 /* disable clock */ 1122 /* disable clock */
1122 mci_writel(host, CLKENA, 0); 1123 mci_writel(host, CLKENA, 0);
@@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1139 1140
1140 /* inform CIU */ 1141 /* inform CIU */
1141 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1142 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1143
1144 /* keep the last clock value that was requested from core */
1145 slot->__clk_old = clock;
1142 } 1146 }
1143 1147
1144 host->current_speed = clock; 1148 host->current_speed = clock;
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 9e740bc232a8..e8cd2dec3263 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
249 * @queue_node: List node for placing this node in the @queue list of 249 * @queue_node: List node for placing this node in the @queue list of
250 * &struct dw_mci. 250 * &struct dw_mci.
251 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 251 * @clock: Clock rate configured by set_ios(). Protected by host->lock.
252 * @__clk_old: The last clock value that was requested from core.
253 * Keeping track of this helps us to avoid spamming the console.
252 * @flags: Random state bits associated with the slot. 254 * @flags: Random state bits associated with the slot.
253 * @id: Number of this slot. 255 * @id: Number of this slot.
254 * @sdio_id: Number of this slot in the SDIO interrupt registers. 256 * @sdio_id: Number of this slot in the SDIO interrupt registers.
@@ -263,6 +265,7 @@ struct dw_mci_slot {
263 struct list_head queue_node; 265 struct list_head queue_node;
264 266
265 unsigned int clock; 267 unsigned int clock;
268 unsigned int __clk_old;
266 269
267 unsigned long flags; 270 unsigned long flags;
268#define DW_MMC_CARD_PRESENT 0 271#define DW_MMC_CARD_PRESENT 0
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index cc07ba0f044d..27fa8b87cd5f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -240,6 +240,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
240 unsigned long flags; 240 unsigned long flags;
241 u32 val; 241 u32 val;
242 242
243 /* Reset ECC hardware */
244 davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
245
243 spin_lock_irqsave(&davinci_nand_lock, flags); 246 spin_lock_irqsave(&davinci_nand_lock, flags);
244 247
245 /* Start 4-bit ECC calculation for read/write */ 248 /* Start 4-bit ECC calculation for read/write */
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
index 25a4fbd4d24a..d54f666417e1 100644
--- a/drivers/mtd/nand/mtk_ecc.c
+++ b/drivers/mtd/nand/mtk_ecc.c
@@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
366 u8 *data, u32 bytes) 366 u8 *data, u32 bytes)
367{ 367{
368 dma_addr_t addr; 368 dma_addr_t addr;
369 u32 *p, len, i; 369 u8 *p;
370 u32 len, i, val;
370 int ret = 0; 371 int ret = 0;
371 372
372 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); 373 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
392 393
393 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 394 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
394 len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 395 len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
395 p = (u32 *)(data + bytes); 396 p = data + bytes;
396 397
397 /* write the parity bytes generated by the ECC back to the OOB region */ 398 /* write the parity bytes generated by the ECC back to the OOB region */
398 for (i = 0; i < len; i++) 399 for (i = 0; i < len; i++) {
399 p[i] = readl(ecc->regs + ECC_ENCPAR(i)); 400 if ((i % 4) == 0)
401 val = readl(ecc->regs + ECC_ENCPAR(i / 4));
402 p[i] = (val >> ((i % 4) * 8)) & 0xff;
403 }
400timeout: 404timeout:
401 405
402 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); 406 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
index ddaa2acb9dd7..5223a2182ee4 100644
--- a/drivers/mtd/nand/mtk_nand.c
+++ b/drivers/mtd/nand/mtk_nand.c
@@ -93,6 +93,9 @@
93#define NFI_FSM_MASK (0xf << 16) 93#define NFI_FSM_MASK (0xf << 16)
94#define NFI_ADDRCNTR (0x70) 94#define NFI_ADDRCNTR (0x70)
95#define CNTR_MASK GENMASK(16, 12) 95#define CNTR_MASK GENMASK(16, 12)
96#define ADDRCNTR_SEC_SHIFT (12)
97#define ADDRCNTR_SEC(val) \
98 (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
96#define NFI_STRADDR (0x80) 99#define NFI_STRADDR (0x80)
97#define NFI_BYTELEN (0x84) 100#define NFI_BYTELEN (0x84)
98#define NFI_CSEL (0x90) 101#define NFI_CSEL (0x90)
@@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
699 } 702 }
700 703
701 ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, 704 ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
702 (reg & CNTR_MASK) >= chip->ecc.steps, 705 ADDRCNTR_SEC(reg) >= chip->ecc.steps,
703 10, MTK_TIMEOUT); 706 10, MTK_TIMEOUT);
704 if (ret) 707 if (ret)
705 dev_err(dev, "hwecc write timeout\n"); 708 dev_err(dev, "hwecc write timeout\n");
@@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
902 dev_warn(nfc->dev, "read ahb/dma done timeout\n"); 905 dev_warn(nfc->dev, "read ahb/dma done timeout\n");
903 906
904 rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, 907 rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
905 (reg & CNTR_MASK) >= sectors, 10, 908 ADDRCNTR_SEC(reg) >= sectors, 10,
906 MTK_TIMEOUT); 909 MTK_TIMEOUT);
907 if (rc < 0) { 910 if (rc < 0) {
908 dev_err(nfc->dev, "subpage done timeout\n"); 911 dev_err(nfc->dev, "subpage done timeout\n");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 5173fadc9a4e..57cbe2b83849 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
943 struct nand_chip *nand_chip = mtd_to_nand(mtd); 943 struct nand_chip *nand_chip = mtd_to_nand(mtd);
944 int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; 944 int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
945 945
946 if (section > nand_chip->ecc.steps) 946 if (section >= nand_chip->ecc.steps)
947 return -ERANGE; 947 return -ERANGE;
948 948
949 if (!section) { 949 if (!section) {
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index a59361c36f40..5513bfd9cdc9 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -2169,7 +2169,7 @@ scan_tail:
2169 return 0; 2169 return 0;
2170 2170
2171return_error: 2171return_error:
2172 if (info->dma) 2172 if (!IS_ERR_OR_NULL(info->dma))
2173 dma_release_channel(info->dma); 2173 dma_release_channel(info->dma);
2174 if (nand_chip->ecc.priv) { 2174 if (nand_chip->ecc.priv) {
2175 nand_bch_free(nand_chip->ecc.priv); 2175 nand_bch_free(nand_chip->ecc.priv);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index e21f7cc5ae4d..8d6208c0b400 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/netdevice.h> 22#include <linux/netdevice.h>
23#include <linux/if_arp.h> 23#include <linux/if_arp.h>
24#include <linux/workqueue.h>
24#include <linux/can.h> 25#include <linux/can.h>
25#include <linux/can/dev.h> 26#include <linux/can/dev.h>
26#include <linux/can/skb.h> 27#include <linux/can/skb.h>
@@ -501,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
501/* 502/*
502 * CAN device restart for bus-off recovery 503 * CAN device restart for bus-off recovery
503 */ 504 */
504static void can_restart(unsigned long data) 505static void can_restart(struct net_device *dev)
505{ 506{
506 struct net_device *dev = (struct net_device *)data;
507 struct can_priv *priv = netdev_priv(dev); 507 struct can_priv *priv = netdev_priv(dev);
508 struct net_device_stats *stats = &dev->stats; 508 struct net_device_stats *stats = &dev->stats;
509 struct sk_buff *skb; 509 struct sk_buff *skb;
@@ -543,6 +543,14 @@ restart:
543 netdev_err(dev, "Error %d during restart", err); 543 netdev_err(dev, "Error %d during restart", err);
544} 544}
545 545
546static void can_restart_work(struct work_struct *work)
547{
548 struct delayed_work *dwork = to_delayed_work(work);
549 struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
550
551 can_restart(priv->dev);
552}
553
546int can_restart_now(struct net_device *dev) 554int can_restart_now(struct net_device *dev)
547{ 555{
548 struct can_priv *priv = netdev_priv(dev); 556 struct can_priv *priv = netdev_priv(dev);
@@ -556,8 +564,8 @@ int can_restart_now(struct net_device *dev)
556 if (priv->state != CAN_STATE_BUS_OFF) 564 if (priv->state != CAN_STATE_BUS_OFF)
557 return -EBUSY; 565 return -EBUSY;
558 566
559 /* Runs as soon as possible in the timer context */ 567 cancel_delayed_work_sync(&priv->restart_work);
560 mod_timer(&priv->restart_timer, jiffies); 568 can_restart(dev);
561 569
562 return 0; 570 return 0;
563} 571}
@@ -578,8 +586,8 @@ void can_bus_off(struct net_device *dev)
578 netif_carrier_off(dev); 586 netif_carrier_off(dev);
579 587
580 if (priv->restart_ms) 588 if (priv->restart_ms)
581 mod_timer(&priv->restart_timer, 589 schedule_delayed_work(&priv->restart_work,
582 jiffies + (priv->restart_ms * HZ) / 1000); 590 msecs_to_jiffies(priv->restart_ms));
583} 591}
584EXPORT_SYMBOL_GPL(can_bus_off); 592EXPORT_SYMBOL_GPL(can_bus_off);
585 593
@@ -688,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
688 return NULL; 696 return NULL;
689 697
690 priv = netdev_priv(dev); 698 priv = netdev_priv(dev);
699 priv->dev = dev;
691 700
692 if (echo_skb_max) { 701 if (echo_skb_max) {
693 priv->echo_skb_max = echo_skb_max; 702 priv->echo_skb_max = echo_skb_max;
@@ -697,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
697 706
698 priv->state = CAN_STATE_STOPPED; 707 priv->state = CAN_STATE_STOPPED;
699 708
700 init_timer(&priv->restart_timer); 709 INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
701 710
702 return dev; 711 return dev;
703} 712}
@@ -778,8 +787,6 @@ int open_candev(struct net_device *dev)
778 if (!netif_carrier_ok(dev)) 787 if (!netif_carrier_ok(dev))
779 netif_carrier_on(dev); 788 netif_carrier_on(dev);
780 789
781 setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
782
783 return 0; 790 return 0;
784} 791}
785EXPORT_SYMBOL_GPL(open_candev); 792EXPORT_SYMBOL_GPL(open_candev);
@@ -794,7 +801,7 @@ void close_candev(struct net_device *dev)
794{ 801{
795 struct can_priv *priv = netdev_priv(dev); 802 struct can_priv *priv = netdev_priv(dev);
796 803
797 del_timer_sync(&priv->restart_timer); 804 cancel_delayed_work_sync(&priv->restart_work);
798 can_flush_echo_skb(dev); 805 can_flush_echo_skb(dev);
799} 806}
800EXPORT_SYMBOL_GPL(close_candev); 807EXPORT_SYMBOL_GPL(close_candev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 2726f032f2d4..a927a730da10 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18134,14 +18134,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18134 18134
18135 rtnl_lock(); 18135 rtnl_lock();
18136 18136
18137 /* We needn't recover from permanent error */
18138 if (state == pci_channel_io_frozen)
18139 tp->pcierr_recovery = true;
18140
18141 /* We probably don't have netdev yet */ 18137 /* We probably don't have netdev yet */
18142 if (!netdev || !netif_running(netdev)) 18138 if (!netdev || !netif_running(netdev))
18143 goto done; 18139 goto done;
18144 18140
18141 /* We needn't recover from permanent error */
18142 if (state == pci_channel_io_frozen)
18143 tp->pcierr_recovery = true;
18144
18145 tg3_phy_stop(tp); 18145 tg3_phy_stop(tp);
18146 18146
18147 tg3_netif_stop(tp); 18147 tg3_netif_stop(tp);
@@ -18238,7 +18238,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
18238 18238
18239 rtnl_lock(); 18239 rtnl_lock();
18240 18240
18241 if (!netif_running(netdev)) 18241 if (!netdev || !netif_running(netdev))
18242 goto done; 18242 goto done;
18243 18243
18244 tg3_full_lock(tp, 0); 18244 tg3_full_lock(tp, 0);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index fb5c63881340..1fa2d87c2fc9 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -89,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
89 .driver_data = 0, 89 .driver_data = 0,
90 }, { 90 }, {
91 .name = "imx25-fec", 91 .name = "imx25-fec",
92 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, 92 .driver_data = FEC_QUIRK_USE_GASKET,
93 }, { 93 }, {
94 .name = "imx27-fec", 94 .name = "imx27-fec",
95 .driver_data = FEC_QUIRK_HAS_RACC, 95 .driver_data = 0,
96 }, { 96 }, {
97 .name = "imx28-fec", 97 .name = "imx28-fec",
98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -180,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
180/* FEC receive acceleration */ 180/* FEC receive acceleration */
181#define FEC_RACC_IPDIS (1 << 1) 181#define FEC_RACC_IPDIS (1 << 1)
182#define FEC_RACC_PRODIS (1 << 2) 182#define FEC_RACC_PRODIS (1 << 2)
183#define FEC_RACC_SHIFT16 BIT(7)
183#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 184#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
184 185
185/* 186/*
@@ -945,9 +946,11 @@ fec_restart(struct net_device *ndev)
945 946
946#if !defined(CONFIG_M5272) 947#if !defined(CONFIG_M5272)
947 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 948 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
948 /* set RX checksum */
949 val = readl(fep->hwp + FEC_RACC); 949 val = readl(fep->hwp + FEC_RACC);
950 /* align IP header */
951 val |= FEC_RACC_SHIFT16;
950 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 952 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
953 /* set RX checksum */
951 val |= FEC_RACC_OPTIONS; 954 val |= FEC_RACC_OPTIONS;
952 else 955 else
953 val &= ~FEC_RACC_OPTIONS; 956 val &= ~FEC_RACC_OPTIONS;
@@ -1428,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1428 prefetch(skb->data - NET_IP_ALIGN); 1431 prefetch(skb->data - NET_IP_ALIGN);
1429 skb_put(skb, pkt_len - 4); 1432 skb_put(skb, pkt_len - 4);
1430 data = skb->data; 1433 data = skb->data;
1434
1435#if !defined(CONFIG_M5272)
1436 if (fep->quirks & FEC_QUIRK_HAS_RACC)
1437 data = skb_pull_inline(skb, 2);
1438#endif
1439
1431 if (!is_copybreak && need_swap) 1440 if (!is_copybreak && need_swap)
1432 swap_buffer(data, pkt_len); 1441 swap_buffer(data, pkt_len);
1433 1442
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 715583f69d28..4d7bbd2df5c0 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -99,8 +99,11 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
99 nvdimm_map->size = size; 99 nvdimm_map->size = size;
100 kref_init(&nvdimm_map->kref); 100 kref_init(&nvdimm_map->kref);
101 101
102 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) 102 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
103 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
104 &offset, size, dev_name(dev));
103 goto err_request_region; 105 goto err_request_region;
106 }
104 107
105 if (flags) 108 if (flags)
106 nvdimm_map->mem = memremap(offset, size, flags); 109 nvdimm_map->mem = memremap(offset, size, flags);
@@ -171,6 +174,9 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
171 kref_get(&nvdimm_map->kref); 174 kref_get(&nvdimm_map->kref);
172 nvdimm_bus_unlock(dev); 175 nvdimm_bus_unlock(dev);
173 176
177 if (!nvdimm_map)
178 return NULL;
179
174 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 180 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
175 return NULL; 181 return NULL;
176 182
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 8024a0ef86d3..0b78a8211f4a 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -52,10 +52,28 @@ struct nvdimm_drvdata {
52struct nd_region_data { 52struct nd_region_data {
53 int ns_count; 53 int ns_count;
54 int ns_active; 54 int ns_active;
55 unsigned int flush_mask; 55 unsigned int hints_shift;
56 void __iomem *flush_wpq[0][0]; 56 void __iomem *flush_wpq[0];
57}; 57};
58 58
59static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
60 int dimm, int hint)
61{
62 unsigned int num = 1 << ndrd->hints_shift;
63 unsigned int mask = num - 1;
64
65 return ndrd->flush_wpq[dimm * num + (hint & mask)];
66}
67
68static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
69 int hint, void __iomem *flush)
70{
71 unsigned int num = 1 << ndrd->hints_shift;
72 unsigned int mask = num - 1;
73
74 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
75}
76
59static inline struct nd_namespace_index *to_namespace_index( 77static inline struct nd_namespace_index *to_namespace_index(
60 struct nvdimm_drvdata *ndd, int i) 78 struct nvdimm_drvdata *ndd, int i)
61{ 79{
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e8d5ba7b29af..4c0ac4abb629 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -38,7 +38,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
38 38
39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), 39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); 40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
41 for (i = 0; i < nvdimm->num_flush; i++) { 41 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
42 struct resource *res = &nvdimm->flush_wpq[i]; 42 struct resource *res = &nvdimm->flush_wpq[i];
43 unsigned long pfn = PHYS_PFN(res->start); 43 unsigned long pfn = PHYS_PFN(res->start);
44 void __iomem *flush_page; 44 void __iomem *flush_page;
@@ -54,14 +54,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
54 54
55 if (j < i) 55 if (j < i)
56 flush_page = (void __iomem *) ((unsigned long) 56 flush_page = (void __iomem *) ((unsigned long)
57 ndrd->flush_wpq[dimm][j] & PAGE_MASK); 57 ndrd_get_flush_wpq(ndrd, dimm, j)
58 & PAGE_MASK);
58 else 59 else
59 flush_page = devm_nvdimm_ioremap(dev, 60 flush_page = devm_nvdimm_ioremap(dev,
60 PHYS_PFN(pfn), PAGE_SIZE); 61 PFN_PHYS(pfn), PAGE_SIZE);
61 if (!flush_page) 62 if (!flush_page)
62 return -ENXIO; 63 return -ENXIO;
63 ndrd->flush_wpq[dimm][i] = flush_page 64 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64 + (res->start & ~PAGE_MASK); 65 + (res->start & ~PAGE_MASK));
65 } 66 }
66 67
67 return 0; 68 return 0;
@@ -93,7 +94,10 @@ int nd_region_activate(struct nd_region *nd_region)
93 return -ENOMEM; 94 return -ENOMEM;
94 dev_set_drvdata(dev, ndrd); 95 dev_set_drvdata(dev, ndrd);
95 96
96 ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; 97 if (!num_flush)
98 return 0;
99
100 ndrd->hints_shift = ilog2(num_flush);
97 for (i = 0; i < nd_region->ndr_mappings; i++) { 101 for (i = 0; i < nd_region->ndr_mappings; i++) {
98 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 102 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
99 struct nvdimm *nvdimm = nd_mapping->nvdimm; 103 struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -900,8 +904,8 @@ void nvdimm_flush(struct nd_region *nd_region)
900 */ 904 */
901 wmb(); 905 wmb();
902 for (i = 0; i < nd_region->ndr_mappings; i++) 906 for (i = 0; i < nd_region->ndr_mappings; i++)
903 if (ndrd->flush_wpq[i][0]) 907 if (ndrd_get_flush_wpq(ndrd, i, 0))
904 writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); 908 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
905 wmb(); 909 wmb();
906} 910}
907EXPORT_SYMBOL_GPL(nvdimm_flush); 911EXPORT_SYMBOL_GPL(nvdimm_flush);
@@ -925,7 +929,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
925 929
926 for (i = 0; i < nd_region->ndr_mappings; i++) 930 for (i = 0; i < nd_region->ndr_mappings; i++)
927 /* flush hints present, flushing required */ 931 /* flush hints present, flushing required */
928 if (ndrd->flush_wpq[i][0]) 932 if (ndrd_get_flush_wpq(ndrd, i, 0))
929 return 1; 933 return 1;
930 934
931 /* 935 /*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c2c2c28e6eb5..fbdb2267e460 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
561 561
562 queue = &ctrl->queues[idx]; 562 queue = &ctrl->queues[idx];
563 queue->ctrl = ctrl; 563 queue->ctrl = ctrl;
564 queue->flags = 0;
565 init_completion(&queue->cm_done); 564 init_completion(&queue->cm_done);
566 565
567 if (idx > 0) 566 if (idx > 0)
@@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
595 goto out_destroy_cm_id; 594 goto out_destroy_cm_id;
596 } 595 }
597 596
597 clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
598 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); 598 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
599 599
600 return 0; 600 return 0;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ba9af4a2bd2a..ec6381e57eb7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -486,6 +486,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
486 else 486 else
487 shost->dma_boundary = 0xffffffff; 487 shost->dma_boundary = 0xffffffff;
488 488
489 shost->use_blk_mq = scsi_use_blk_mq;
490
489 device_initialize(&shost->shost_gendev); 491 device_initialize(&shost->shost_gendev);
490 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); 492 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
491 shost->shost_gendev.bus = &scsi_bus_type; 493 shost->shost_gendev.bus = &scsi_bus_type;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1f36aca44394..1deb6adc411f 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1160,7 +1160,6 @@ bool scsi_use_blk_mq = true;
1160bool scsi_use_blk_mq = false; 1160bool scsi_use_blk_mq = false;
1161#endif 1161#endif
1162module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 1162module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
1163EXPORT_SYMBOL_GPL(scsi_use_blk_mq);
1164 1163
1165static int __init init_scsi(void) 1164static int __init init_scsi(void)
1166{ 1165{
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 57a4b9973320..85c8a51bc563 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -29,6 +29,7 @@ extern int scsi_init_hosts(void);
29extern void scsi_exit_hosts(void); 29extern void scsi_exit_hosts(void);
30 30
31/* scsi.c */ 31/* scsi.c */
32extern bool scsi_use_blk_mq;
32extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 33extern int scsi_setup_command_freelist(struct Scsi_Host *shost);
33extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 34extern void scsi_destroy_command_freelist(struct Scsi_Host *shost);
34#ifdef CONFIG_SCSI_LOGGING 35#ifdef CONFIG_SCSI_LOGGING
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 38c2df84cabd..665da8f66ff1 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4271,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4271 if (ret < 0) 4271 if (ret < 0)
4272 return ret; 4272 return ret;
4273 4273
4274 /* 4274 /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
4275 * Use new btrfs_qgroup_reserve_data to reserve precious data space
4276 *
4277 * TODO: Find a good method to avoid reserve data space for NOCOW
4278 * range, but don't impact performance on quota disable case.
4279 */
4280 ret = btrfs_qgroup_reserve_data(inode, start, len); 4275 ret = btrfs_qgroup_reserve_data(inode, start, len);
4276 if (ret)
4277 btrfs_free_reserved_data_space_noquota(inode, start, len);
4281 return ret; 4278 return ret;
4282} 4279}
4283 4280
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b2a2da5893af..7fd939bfbd99 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1634 int namelen; 1634 int namelen;
1635 int ret = 0; 1635 int ret = 0;
1636 1636
1637 if (!S_ISDIR(file_inode(file)->i_mode))
1638 return -ENOTDIR;
1639
1637 ret = mnt_want_write_file(file); 1640 ret = mnt_want_write_file(file);
1638 if (ret) 1641 if (ret)
1639 goto out; 1642 goto out;
@@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
1691 struct btrfs_ioctl_vol_args *vol_args; 1694 struct btrfs_ioctl_vol_args *vol_args;
1692 int ret; 1695 int ret;
1693 1696
1697 if (!S_ISDIR(file_inode(file)->i_mode))
1698 return -ENOTDIR;
1699
1694 vol_args = memdup_user(arg, sizeof(*vol_args)); 1700 vol_args = memdup_user(arg, sizeof(*vol_args));
1695 if (IS_ERR(vol_args)) 1701 if (IS_ERR(vol_args))
1696 return PTR_ERR(vol_args); 1702 return PTR_ERR(vol_args);
@@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
1714 bool readonly = false; 1720 bool readonly = false;
1715 struct btrfs_qgroup_inherit *inherit = NULL; 1721 struct btrfs_qgroup_inherit *inherit = NULL;
1716 1722
1723 if (!S_ISDIR(file_inode(file)->i_mode))
1724 return -ENOTDIR;
1725
1717 vol_args = memdup_user(arg, sizeof(*vol_args)); 1726 vol_args = memdup_user(arg, sizeof(*vol_args));
1718 if (IS_ERR(vol_args)) 1727 if (IS_ERR(vol_args))
1719 return PTR_ERR(vol_args); 1728 return PTR_ERR(vol_args);
@@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2357 int ret; 2366 int ret;
2358 int err = 0; 2367 int err = 0;
2359 2368
2369 if (!S_ISDIR(dir->i_mode))
2370 return -ENOTDIR;
2371
2360 vol_args = memdup_user(arg, sizeof(*vol_args)); 2372 vol_args = memdup_user(arg, sizeof(*vol_args));
2361 if (IS_ERR(vol_args)) 2373 if (IS_ERR(vol_args))
2362 return PTR_ERR(vol_args); 2374 return PTR_ERR(vol_args);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index c30cf49b69d2..2c6312db8516 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
333 if (bin_attr->cb_max_size && 333 if (bin_attr->cb_max_size &&
334 *ppos + count > bin_attr->cb_max_size) { 334 *ppos + count > bin_attr->cb_max_size) {
335 len = -EFBIG; 335 len = -EFBIG;
336 goto out;
336 } 337 }
337 338
338 tbuf = vmalloc(*ppos + count); 339 tbuf = vmalloc(*ppos + count);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 98d36548153d..bbb4b3e5b4ff 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1842,6 +1842,16 @@ out_commit:
1842 ocfs2_commit_trans(osb, handle); 1842 ocfs2_commit_trans(osb, handle);
1843 1843
1844out: 1844out:
1845 /*
1846 * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(),
1847 * even in case of error here like ENOSPC and ENOMEM. So, we need
1848 * to unlock the target page manually to prevent deadlocks when
1849 * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED
1850 * to VM code.
1851 */
1852 if (wc->w_target_locked)
1853 unlock_page(mmap_page);
1854
1845 ocfs2_free_write_ctxt(inode, wc); 1855 ocfs2_free_write_ctxt(inode, wc);
1846 1856
1847 if (data_ac) { 1857 if (data_ac) {
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5261751f6bd4..5f5270941ba0 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,7 @@ enum can_mode {
32 * CAN common private data 32 * CAN common private data
33 */ 33 */
34struct can_priv { 34struct can_priv {
35 struct net_device *dev;
35 struct can_device_stats can_stats; 36 struct can_device_stats can_stats;
36 37
37 struct can_bittiming bittiming, data_bittiming; 38 struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
47 u32 ctrlmode_static; /* static enabled options for driver/hardware */ 48 u32 ctrlmode_static; /* static enabled options for driver/hardware */
48 49
49 int restart_ms; 50 int restart_ms;
50 struct timer_list restart_timer; 51 struct delayed_work restart_work;
51 52
52 int (*do_set_bittiming)(struct net_device *dev); 53 int (*do_set_bittiming)(struct net_device *dev);
53 int (*do_set_data_bittiming)(struct net_device *dev); 54 int (*do_set_data_bittiming)(struct net_device *dev);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 66533e18276c..dc69df04abc1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
718#define dma_mmap_writecombine dma_mmap_wc 718#define dma_mmap_writecombine dma_mmap_wc
719#endif 719#endif
720 720
721#ifdef CONFIG_NEED_DMA_MAP_STATE 721#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d351fd3e1049..e5fb81376e92 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -120,5 +120,5 @@ struct mfc_cache {
120struct rtmsg; 120struct rtmsg;
121int ipmr_get_route(struct net *net, struct sk_buff *skb, 121int ipmr_get_route(struct net *net, struct sk_buff *skb,
122 __be32 saddr, __be32 daddr, 122 __be32 saddr, __be32 daddr,
123 struct rtmsg *rtm, int nowait); 123 struct rtmsg *rtm, int nowait, u32 portid);
124#endif 124#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3987b64040c5..19a1c0c2993b 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
116 116
117struct rtmsg; 117struct rtmsg;
118extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, 118extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
119 struct rtmsg *rtm, int nowait); 119 struct rtmsg *rtm, int nowait, u32 portid);
120 120
121#ifdef CONFIG_IPV6_MROUTE 121#ifdef CONFIG_IPV6_MROUTE
122extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); 122extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7e3d53753612..01e84436cddf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -620,6 +620,7 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
620 return __get_user(c, end); 620 return __get_user(c, end);
621 } 621 }
622 622
623 (void)c;
623 return 0; 624 return 0;
624} 625}
625 626
diff --git a/include/linux/property.h b/include/linux/property.h
index 3a2f9ae25c86..856e50b2140c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -190,7 +190,7 @@ struct property_entry {
190 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ 190 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
191 .is_array = true, \ 191 .is_array = true, \
192 .is_string = false, \ 192 .is_string = false, \
193 { .pointer = { _type_##_data = _val_ } }, \ 193 { .pointer = { ._type_##_data = _val_ } }, \
194} 194}
195 195
196#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ 196#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b17cc4830fa6..4a529c984a3f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
257 257
258static inline void workingset_node_pages_dec(struct radix_tree_node *node) 258static inline void workingset_node_pages_dec(struct radix_tree_node *node)
259{ 259{
260 VM_BUG_ON(!workingset_node_pages(node));
260 node->count--; 261 node->count--;
261} 262}
262 263
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
272 273
273static inline void workingset_node_shadows_dec(struct radix_tree_node *node) 274static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
274{ 275{
276 VM_BUG_ON(!workingset_node_shadows(node));
275 node->count -= 1U << RADIX_TREE_COUNT_SHIFT; 277 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
276} 278}
277 279
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 8693dc452a7f..11c3bf262a85 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -555,6 +555,9 @@ struct sctp_chunk {
555 555
556 atomic_t refcnt; 556 atomic_t refcnt;
557 557
558 /* How many times this chunk have been sent, for prsctp RTX policy */
559 int sent_count;
560
558 /* This is our link to the per-transport transmitted list. */ 561 /* This is our link to the per-transport transmitted list. */
559 struct list_head transmitted_list; 562 struct list_head transmitted_list;
560 563
@@ -604,16 +607,6 @@ struct sctp_chunk {
604 /* This needs to be recoverable for SCTP_SEND_FAILED events. */ 607 /* This needs to be recoverable for SCTP_SEND_FAILED events. */
605 struct sctp_sndrcvinfo sinfo; 608 struct sctp_sndrcvinfo sinfo;
606 609
607 /* We use this field to record param for prsctp policies,
608 * for TTL policy, it is the time_to_drop of this chunk,
609 * for RTX policy, it is the max_sent_count of this chunk,
610 * for PRIO policy, it is the priority of this chunk.
611 */
612 unsigned long prsctp_param;
613
614 /* How many times this chunk have been sent, for prsctp RTX policy */
615 int sent_count;
616
617 /* Which association does this belong to? */ 610 /* Which association does this belong to? */
618 struct sctp_association *asoc; 611 struct sctp_association *asoc;
619 612
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 0dee7afa93d6..7e4cd53139ed 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
771 shost->tmf_in_progress; 771 shost->tmf_in_progress;
772} 772}
773 773
774extern bool scsi_use_blk_mq;
775
776static inline bool shost_use_blk_mq(struct Scsi_Host *shost) 774static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
777{ 775{
778 return scsi_use_blk_mq; 776 return shost->use_blk_mq;
779
780} 777}
781 778
782extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); 779extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5e8dab5bf9ad..d6b729beba49 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3446,9 +3446,28 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3446 * Except for the root, subtree_control must be zero for a cgroup 3446 * Except for the root, subtree_control must be zero for a cgroup
3447 * with tasks so that child cgroups don't compete against tasks. 3447 * with tasks so that child cgroups don't compete against tasks.
3448 */ 3448 */
3449 if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) { 3449 if (enable && cgroup_parent(cgrp)) {
3450 ret = -EBUSY; 3450 struct cgrp_cset_link *link;
3451 goto out_unlock; 3451
3452 /*
3453 * Because namespaces pin csets too, @cgrp->cset_links
3454 * might not be empty even when @cgrp is empty. Walk and
3455 * verify each cset.
3456 */
3457 spin_lock_irq(&css_set_lock);
3458
3459 ret = 0;
3460 list_for_each_entry(link, &cgrp->cset_links, cset_link) {
3461 if (css_set_populated(link->cset)) {
3462 ret = -EBUSY;
3463 break;
3464 }
3465 }
3466
3467 spin_unlock_irq(&css_set_lock);
3468
3469 if (ret)
3470 goto out_unlock;
3452 } 3471 }
3453 3472
3454 /* save and update control masks and prepare csses */ 3473 /* save and update control masks and prepare csses */
@@ -3899,7 +3918,9 @@ void cgroup_file_notify(struct cgroup_file *cfile)
3899 * cgroup_task_count - count the number of tasks in a cgroup. 3918 * cgroup_task_count - count the number of tasks in a cgroup.
3900 * @cgrp: the cgroup in question 3919 * @cgrp: the cgroup in question
3901 * 3920 *
3902 * Return the number of tasks in the cgroup. 3921 * Return the number of tasks in the cgroup. The returned number can be
3922 * higher than the actual number of tasks due to css_set references from
3923 * namespace roots and temporary usages.
3903 */ 3924 */
3904static int cgroup_task_count(const struct cgroup *cgrp) 3925static int cgroup_task_count(const struct cgroup *cgrp)
3905{ 3926{
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c27e53326bef..2b4c20ab5bbe 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = {
325/* 325/*
326 * Return in pmask the portion of a cpusets's cpus_allowed that 326 * Return in pmask the portion of a cpusets's cpus_allowed that
327 * are online. If none are online, walk up the cpuset hierarchy 327 * are online. If none are online, walk up the cpuset hierarchy
328 * until we find one that does have some online cpus. The top 328 * until we find one that does have some online cpus.
329 * cpuset always has some cpus online.
330 * 329 *
331 * One way or another, we guarantee to return some non-empty subset 330 * One way or another, we guarantee to return some non-empty subset
332 * of cpu_online_mask. 331 * of cpu_online_mask.
@@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = {
335 */ 334 */
336static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) 335static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
337{ 336{
338 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) 337 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
339 cs = parent_cs(cs); 338 cs = parent_cs(cs);
339 if (unlikely(!cs)) {
340 /*
341 * The top cpuset doesn't have any online cpu as a
342 * consequence of a race between cpuset_hotplug_work
343 * and cpu hotplug notifier. But we know the top
344 * cpuset's effective_cpus is on its way to to be
345 * identical to cpu_online_mask.
346 */
347 cpumask_copy(pmask, cpu_online_mask);
348 return;
349 }
350 }
340 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); 351 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
341} 352}
342 353
@@ -2074,7 +2085,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2074 * which could have been changed by cpuset just after it inherits the 2085 * which could have been changed by cpuset just after it inherits the
2075 * state from the parent and before it sits on the cgroup's task list. 2086 * state from the parent and before it sits on the cgroup's task list.
2076 */ 2087 */
2077void cpuset_fork(struct task_struct *task) 2088static void cpuset_fork(struct task_struct *task)
2078{ 2089{
2079 if (task_css_is_root(task, cpuset_cgrp_id)) 2090 if (task_css_is_root(task, cpuset_cgrp_id))
2080 return; 2091 return;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9fc3be0c7d01..5c02f6764868 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3929,7 +3929,7 @@ static void exclusive_event_destroy(struct perf_event *event)
3929 3929
3930static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3930static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3931{ 3931{
3932 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3932 if ((e1->pmu == e2->pmu) &&
3933 (e1->cpu == e2->cpu || 3933 (e1->cpu == e2->cpu ||
3934 e1->cpu == -1 || 3934 e1->cpu == -1 ||
3935 e2->cpu == -1)) 3935 e2->cpu == -1))
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 637389088b3f..26ba5654d9d5 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -820,6 +820,8 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
820 desc->name = name; 820 desc->name = name;
821 821
822 if (handle != handle_bad_irq && is_chained) { 822 if (handle != handle_bad_irq && is_chained) {
823 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
824
823 /* 825 /*
824 * We're about to start this interrupt immediately, 826 * We're about to start this interrupt immediately,
825 * hence the need to set the trigger configuration. 827 * hence the need to set the trigger configuration.
@@ -828,8 +830,10 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
828 * chained interrupt. Reset it immediately because we 830 * chained interrupt. Reset it immediately because we
829 * do know better. 831 * do know better.
830 */ 832 */
831 __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data)); 833 if (type != IRQ_TYPE_NONE) {
832 desc->handle_irq = handle; 834 __irq_set_trigger(desc, type);
835 desc->handle_irq = handle;
836 }
833 837
834 irq_settings_set_noprobe(desc); 838 irq_settings_set_noprobe(desc);
835 irq_settings_set_norequest(desc); 839 irq_settings_set_norequest(desc);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index dade4c9559cc..7bc56762ca35 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -5124,19 +5124,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
5124 struct trace_iterator *iter = filp->private_data; 5124 struct trace_iterator *iter = filp->private_data;
5125 ssize_t sret; 5125 ssize_t sret;
5126 5126
5127 /* return any leftover data */
5128 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5129 if (sret != -EBUSY)
5130 return sret;
5131
5132 trace_seq_init(&iter->seq);
5133
5134 /* 5127 /*
5135 * Avoid more than one consumer on a single file descriptor 5128 * Avoid more than one consumer on a single file descriptor
5136 * This is just a matter of traces coherency, the ring buffer itself 5129 * This is just a matter of traces coherency, the ring buffer itself
5137 * is protected. 5130 * is protected.
5138 */ 5131 */
5139 mutex_lock(&iter->mutex); 5132 mutex_lock(&iter->mutex);
5133
5134 /* return any leftover data */
5135 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5136 if (sret != -EBUSY)
5137 goto out;
5138
5139 trace_seq_init(&iter->seq);
5140
5140 if (iter->trace->read) { 5141 if (iter->trace->read) {
5141 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 5142 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5142 if (sret) 5143 if (sret)
@@ -6163,9 +6164,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6163 return -EBUSY; 6164 return -EBUSY;
6164#endif 6165#endif
6165 6166
6166 if (splice_grow_spd(pipe, &spd))
6167 return -ENOMEM;
6168
6169 if (*ppos & (PAGE_SIZE - 1)) 6167 if (*ppos & (PAGE_SIZE - 1))
6170 return -EINVAL; 6168 return -EINVAL;
6171 6169
@@ -6175,6 +6173,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6175 len &= PAGE_MASK; 6173 len &= PAGE_MASK;
6176 } 6174 }
6177 6175
6176 if (splice_grow_spd(pipe, &spd))
6177 return -ENOMEM;
6178
6178 again: 6179 again:
6179 trace_access_lock(iter->cpu_file); 6180 trace_access_lock(iter->cpu_file);
6180 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file); 6181 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
@@ -6232,19 +6233,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6232 /* did we read anything? */ 6233 /* did we read anything? */
6233 if (!spd.nr_pages) { 6234 if (!spd.nr_pages) {
6234 if (ret) 6235 if (ret)
6235 return ret; 6236 goto out;
6236 6237
6238 ret = -EAGAIN;
6237 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 6239 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6238 return -EAGAIN; 6240 goto out;
6239 6241
6240 ret = wait_on_pipe(iter, true); 6242 ret = wait_on_pipe(iter, true);
6241 if (ret) 6243 if (ret)
6242 return ret; 6244 goto out;
6243 6245
6244 goto again; 6246 goto again;
6245 } 6247 }
6246 6248
6247 ret = splice_to_pipe(pipe, &spd); 6249 ret = splice_to_pipe(pipe, &spd);
6250out:
6248 splice_shrink_spd(&spd); 6251 splice_shrink_spd(&spd);
6249 6252
6250 return ret; 6253 return ret;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2e2cca509231..cab7405f48d2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -821,7 +821,7 @@ config DETECT_HUNG_TASK
821 help 821 help
822 Say Y here to enable the kernel to detect "hung tasks", 822 Say Y here to enable the kernel to detect "hung tasks",
823 which are bugs that cause the task to be stuck in 823 which are bugs that cause the task to be stuck in
824 uninterruptible "D" state indefinitiley. 824 uninterruptible "D" state indefinitely.
825 825
826 When a hung task is detected, the kernel will print the 826 When a hung task is detected, the kernel will print the
827 current stack trace (which you should report), but the 827 current stack trace (which you should report), but the
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1b7bf7314141..91f0727e3cad 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -105,10 +105,10 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
105 105
106#ifdef CONFIG_RADIX_TREE_MULTIORDER 106#ifdef CONFIG_RADIX_TREE_MULTIORDER
107 if (radix_tree_is_internal_node(entry)) { 107 if (radix_tree_is_internal_node(entry)) {
108 unsigned long siboff = get_slot_offset(parent, entry); 108 if (is_sibling_entry(parent, entry)) {
109 if (siboff < RADIX_TREE_MAP_SIZE) { 109 void **sibentry = (void **) entry_to_node(entry);
110 offset = siboff; 110 offset = get_slot_offset(parent, sibentry);
111 entry = rcu_dereference_raw(parent->slots[offset]); 111 entry = rcu_dereference_raw(*sibentry);
112 } 112 }
113 } 113 }
114#endif 114#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index 8a287dfc5372..2d0986a64f1f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -110,6 +110,62 @@
110 * ->tasklist_lock (memory_failure, collect_procs_ao) 110 * ->tasklist_lock (memory_failure, collect_procs_ao)
111 */ 111 */
112 112
113static int page_cache_tree_insert(struct address_space *mapping,
114 struct page *page, void **shadowp)
115{
116 struct radix_tree_node *node;
117 void **slot;
118 int error;
119
120 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
121 &node, &slot);
122 if (error)
123 return error;
124 if (*slot) {
125 void *p;
126
127 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
128 if (!radix_tree_exceptional_entry(p))
129 return -EEXIST;
130
131 mapping->nrexceptional--;
132 if (!dax_mapping(mapping)) {
133 if (shadowp)
134 *shadowp = p;
135 if (node)
136 workingset_node_shadows_dec(node);
137 } else {
138 /* DAX can replace empty locked entry with a hole */
139 WARN_ON_ONCE(p !=
140 (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
141 RADIX_DAX_ENTRY_LOCK));
142 /* DAX accounts exceptional entries as normal pages */
143 if (node)
144 workingset_node_pages_dec(node);
145 /* Wakeup waiters for exceptional entry lock */
146 dax_wake_mapping_entry_waiter(mapping, page->index,
147 false);
148 }
149 }
150 radix_tree_replace_slot(slot, page);
151 mapping->nrpages++;
152 if (node) {
153 workingset_node_pages_inc(node);
154 /*
155 * Don't track node that contains actual pages.
156 *
157 * Avoid acquiring the list_lru lock if already
158 * untracked. The list_empty() test is safe as
159 * node->private_list is protected by
160 * mapping->tree_lock.
161 */
162 if (!list_empty(&node->private_list))
163 list_lru_del(&workingset_shadow_nodes,
164 &node->private_list);
165 }
166 return 0;
167}
168
113static void page_cache_tree_delete(struct address_space *mapping, 169static void page_cache_tree_delete(struct address_space *mapping,
114 struct page *page, void *shadow) 170 struct page *page, void *shadow)
115{ 171{
@@ -561,7 +617,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
561 617
562 spin_lock_irqsave(&mapping->tree_lock, flags); 618 spin_lock_irqsave(&mapping->tree_lock, flags);
563 __delete_from_page_cache(old, NULL); 619 __delete_from_page_cache(old, NULL);
564 error = radix_tree_insert(&mapping->page_tree, offset, new); 620 error = page_cache_tree_insert(mapping, new, NULL);
565 BUG_ON(error); 621 BUG_ON(error);
566 mapping->nrpages++; 622 mapping->nrpages++;
567 623
@@ -584,62 +640,6 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
584} 640}
585EXPORT_SYMBOL_GPL(replace_page_cache_page); 641EXPORT_SYMBOL_GPL(replace_page_cache_page);
586 642
587static int page_cache_tree_insert(struct address_space *mapping,
588 struct page *page, void **shadowp)
589{
590 struct radix_tree_node *node;
591 void **slot;
592 int error;
593
594 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
595 &node, &slot);
596 if (error)
597 return error;
598 if (*slot) {
599 void *p;
600
601 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
602 if (!radix_tree_exceptional_entry(p))
603 return -EEXIST;
604
605 mapping->nrexceptional--;
606 if (!dax_mapping(mapping)) {
607 if (shadowp)
608 *shadowp = p;
609 if (node)
610 workingset_node_shadows_dec(node);
611 } else {
612 /* DAX can replace empty locked entry with a hole */
613 WARN_ON_ONCE(p !=
614 (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
615 RADIX_DAX_ENTRY_LOCK));
616 /* DAX accounts exceptional entries as normal pages */
617 if (node)
618 workingset_node_pages_dec(node);
619 /* Wakeup waiters for exceptional entry lock */
620 dax_wake_mapping_entry_waiter(mapping, page->index,
621 false);
622 }
623 }
624 radix_tree_replace_slot(slot, page);
625 mapping->nrpages++;
626 if (node) {
627 workingset_node_pages_inc(node);
628 /*
629 * Don't track node that contains actual pages.
630 *
631 * Avoid acquiring the list_lru lock if already
632 * untracked. The list_empty() test is safe as
633 * node->private_list is protected by
634 * mapping->tree_lock.
635 */
636 if (!list_empty(&node->private_list))
637 list_lru_del(&workingset_shadow_nodes,
638 &node->private_list);
639 }
640 return 0;
641}
642
643static int __add_to_page_cache_locked(struct page *page, 643static int __add_to_page_cache_locked(struct page *page,
644 struct address_space *mapping, 644 struct address_space *mapping,
645 pgoff_t offset, gfp_t gfp_mask, 645 pgoff_t offset, gfp_t gfp_mask,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a6abd76baa72..53ae6d00656a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1138,9 +1138,6 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
1138 bool was_writable; 1138 bool was_writable;
1139 int flags = 0; 1139 int flags = 0;
1140 1140
1141 /* A PROT_NONE fault should not end up here */
1142 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1143
1144 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 1141 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd);
1145 if (unlikely(!pmd_same(pmd, *fe->pmd))) 1142 if (unlikely(!pmd_same(pmd, *fe->pmd)))
1146 goto out_unlock; 1143 goto out_unlock;
diff --git a/mm/ksm.c b/mm/ksm.c
index 73d43bafd9fb..5048083b60f2 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
283{ 283{
284 struct rmap_item *rmap_item; 284 struct rmap_item *rmap_item;
285 285
286 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 286 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
287 __GFP_NORETRY | __GFP_NOWARN);
287 if (rmap_item) 288 if (rmap_item)
288 ksm_rmap_items++; 289 ksm_rmap_items++;
289 return rmap_item; 290 return rmap_item;
diff --git a/mm/memory.c b/mm/memory.c
index 83be99d9d8a1..793fe0f9841c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3351,9 +3351,6 @@ static int do_numa_page(struct fault_env *fe, pte_t pte)
3351 bool was_writable = pte_write(pte); 3351 bool was_writable = pte_write(pte);
3352 int flags = 0; 3352 int flags = 0;
3353 3353
3354 /* A PROT_NONE fault should not end up here */
3355 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
3356
3357 /* 3354 /*
3358 * The "pte" at this point cannot be used safely without 3355 * The "pte" at this point cannot be used safely without
3359 * validation through pte_unmap_same(). It's of NUMA type but 3356 * validation through pte_unmap_same(). It's of NUMA type but
@@ -3458,6 +3455,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd)
3458 return VM_FAULT_FALLBACK; 3455 return VM_FAULT_FALLBACK;
3459} 3456}
3460 3457
3458static inline bool vma_is_accessible(struct vm_area_struct *vma)
3459{
3460 return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE);
3461}
3462
3461/* 3463/*
3462 * These routines also need to handle stuff like marking pages dirty 3464 * These routines also need to handle stuff like marking pages dirty
3463 * and/or accessed for architectures that don't do it in hardware (most 3465 * and/or accessed for architectures that don't do it in hardware (most
@@ -3524,7 +3526,7 @@ static int handle_pte_fault(struct fault_env *fe)
3524 if (!pte_present(entry)) 3526 if (!pte_present(entry))
3525 return do_swap_page(fe, entry); 3527 return do_swap_page(fe, entry);
3526 3528
3527 if (pte_protnone(entry)) 3529 if (pte_protnone(entry) && vma_is_accessible(fe->vma))
3528 return do_numa_page(fe, entry); 3530 return do_numa_page(fe, entry);
3529 3531
3530 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); 3532 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd);
@@ -3590,7 +3592,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3590 3592
3591 barrier(); 3593 barrier();
3592 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 3594 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
3593 if (pmd_protnone(orig_pmd)) 3595 if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
3594 return do_huge_pmd_numa_page(&fe, orig_pmd); 3596 return do_huge_pmd_numa_page(&fe, orig_pmd);
3595 3597
3596 if ((fe.flags & FAULT_FLAG_WRITE) && 3598 if ((fe.flags & FAULT_FLAG_WRITE) &&
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index b58906b6215c..9d29ba0f7192 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1555,8 +1555,8 @@ static struct page *new_node_page(struct page *page, unsigned long private,
1555{ 1555{
1556 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; 1556 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
1557 int nid = page_to_nid(page); 1557 int nid = page_to_nid(page);
1558 nodemask_t nmask = node_online_map; 1558 nodemask_t nmask = node_states[N_MEMORY];
1559 struct page *new_page; 1559 struct page *new_page = NULL;
1560 1560
1561 /* 1561 /*
1562 * TODO: allocate a destination hugepage from a nearest neighbor node, 1562 * TODO: allocate a destination hugepage from a nearest neighbor node,
@@ -1567,14 +1567,14 @@ static struct page *new_node_page(struct page *page, unsigned long private,
1567 return alloc_huge_page_node(page_hstate(compound_head(page)), 1567 return alloc_huge_page_node(page_hstate(compound_head(page)),
1568 next_node_in(nid, nmask)); 1568 next_node_in(nid, nmask));
1569 1569
1570 if (nid != next_node_in(nid, nmask)) 1570 node_clear(nid, nmask);
1571 node_clear(nid, nmask);
1572 1571
1573 if (PageHighMem(page) 1572 if (PageHighMem(page)
1574 || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 1573 || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
1575 gfp_mask |= __GFP_HIGHMEM; 1574 gfp_mask |= __GFP_HIGHMEM;
1576 1575
1577 new_page = __alloc_pages_nodemask(gfp_mask, 0, 1576 if (!nodes_empty(nmask))
1577 new_page = __alloc_pages_nodemask(gfp_mask, 0,
1578 node_zonelist(nid, gfp_mask), &nmask); 1578 node_zonelist(nid, gfp_mask), &nmask);
1579 if (!new_page) 1579 if (!new_page)
1580 new_page = __alloc_pages(gfp_mask, 0, 1580 new_page = __alloc_pages(gfp_mask, 0,
diff --git a/mm/shmem.c b/mm/shmem.c
index fd8b2b5741b1..971fc83e6402 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -270,7 +270,7 @@ bool shmem_charge(struct inode *inode, long pages)
270 info->alloced -= pages; 270 info->alloced -= pages;
271 shmem_recalc_inode(inode); 271 shmem_recalc_inode(inode);
272 spin_unlock_irqrestore(&info->lock, flags); 272 spin_unlock_irqrestore(&info->lock, flags);
273 273 shmem_unacct_blocks(info->flags, pages);
274 return false; 274 return false;
275 } 275 }
276 percpu_counter_add(&sbinfo->used_blocks, pages); 276 percpu_counter_add(&sbinfo->used_blocks, pages);
@@ -291,6 +291,7 @@ void shmem_uncharge(struct inode *inode, long pages)
291 291
292 if (sbinfo->max_blocks) 292 if (sbinfo->max_blocks)
293 percpu_counter_sub(&sbinfo->used_blocks, pages); 293 percpu_counter_sub(&sbinfo->used_blocks, pages);
294 shmem_unacct_blocks(info->flags, pages);
294} 295}
295 296
296/* 297/*
@@ -1980,7 +1981,7 @@ unsigned long shmem_get_unmapped_area(struct file *file,
1980 return addr; 1981 return addr;
1981 sb = shm_mnt->mnt_sb; 1982 sb = shm_mnt->mnt_sb;
1982 } 1983 }
1983 if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) 1984 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
1984 return addr; 1985 return addr;
1985 } 1986 }
1986 1987
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b1e12a1ea9cf..0fe8b7113868 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2303,23 +2303,6 @@ out:
2303 } 2303 }
2304} 2304}
2305 2305
2306#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
2307static void init_tlb_ubc(void)
2308{
2309 /*
2310 * This deliberately does not clear the cpumask as it's expensive
2311 * and unnecessary. If there happens to be data in there then the
2312 * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and
2313 * then will be cleared.
2314 */
2315 current->tlb_ubc.flush_required = false;
2316}
2317#else
2318static inline void init_tlb_ubc(void)
2319{
2320}
2321#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
2322
2323/* 2306/*
2324 * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 2307 * This is a basic per-node page freer. Used by both kswapd and direct reclaim.
2325 */ 2308 */
@@ -2355,8 +2338,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
2355 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 2338 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() &&
2356 sc->priority == DEF_PRIORITY); 2339 sc->priority == DEF_PRIORITY);
2357 2340
2358 init_tlb_ubc();
2359
2360 blk_start_plug(&plug); 2341 blk_start_plug(&plug);
2361 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 2342 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
2362 nr[LRU_INACTIVE_FILE]) { 2343 nr[LRU_INACTIVE_FILE]) {
diff --git a/mm/workingset.c b/mm/workingset.c
index 69551cfae97b..617475f529f4 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -418,21 +418,19 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
418 * no pages, so we expect to be able to remove them all and 418 * no pages, so we expect to be able to remove them all and
419 * delete and free the empty node afterwards. 419 * delete and free the empty node afterwards.
420 */ 420 */
421 421 BUG_ON(!workingset_node_shadows(node));
422 BUG_ON(!node->count); 422 BUG_ON(workingset_node_pages(node));
423 BUG_ON(node->count & RADIX_TREE_COUNT_MASK);
424 423
425 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { 424 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
426 if (node->slots[i]) { 425 if (node->slots[i]) {
427 BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); 426 BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
428 node->slots[i] = NULL; 427 node->slots[i] = NULL;
429 BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); 428 workingset_node_shadows_dec(node);
430 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
431 BUG_ON(!mapping->nrexceptional); 429 BUG_ON(!mapping->nrexceptional);
432 mapping->nrexceptional--; 430 mapping->nrexceptional--;
433 } 431 }
434 } 432 }
435 BUG_ON(node->count); 433 BUG_ON(workingset_node_shadows(node));
436 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); 434 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
437 if (!__radix_tree_delete_node(&mapping->page_tree, node)) 435 if (!__radix_tree_delete_node(&mapping->page_tree, node))
438 BUG(); 436 BUG();
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a87bcd2d4a94..5f006e13de56 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2123,7 +2123,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2123 2123
2124int ipmr_get_route(struct net *net, struct sk_buff *skb, 2124int ipmr_get_route(struct net *net, struct sk_buff *skb,
2125 __be32 saddr, __be32 daddr, 2125 __be32 saddr, __be32 daddr,
2126 struct rtmsg *rtm, int nowait) 2126 struct rtmsg *rtm, int nowait, u32 portid)
2127{ 2127{
2128 struct mfc_cache *cache; 2128 struct mfc_cache *cache;
2129 struct mr_table *mrt; 2129 struct mr_table *mrt;
@@ -2168,6 +2168,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
2168 return -ENOMEM; 2168 return -ENOMEM;
2169 } 2169 }
2170 2170
2171 NETLINK_CB(skb2).portid = portid;
2171 skb_push(skb2, sizeof(struct iphdr)); 2172 skb_push(skb2, sizeof(struct iphdr));
2172 skb_reset_network_header(skb2); 2173 skb_reset_network_header(skb2);
2173 iph = ip_hdr(skb2); 2174 iph = ip_hdr(skb2);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 654a9af20136..f2be689a6c85 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2500,7 +2500,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
2500 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2500 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2501 int err = ipmr_get_route(net, skb, 2501 int err = ipmr_get_route(net, skb,
2502 fl4->saddr, fl4->daddr, 2502 fl4->saddr, fl4->daddr,
2503 r, nowait); 2503 r, nowait, portid);
2504
2504 if (err <= 0) { 2505 if (err <= 0) {
2505 if (!nowait) { 2506 if (!nowait) {
2506 if (err == 0) 2507 if (err == 0)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8c6ad2d319d6..a27b9c0e27c0 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2362,10 +2362,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
2362 } 2362 }
2363#if IS_ENABLED(CONFIG_IPV6) 2363#if IS_ENABLED(CONFIG_IPV6)
2364 else if (sk->sk_family == AF_INET6) { 2364 else if (sk->sk_family == AF_INET6) {
2365 struct ipv6_pinfo *np = inet6_sk(sk);
2366 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2365 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2367 msg, 2366 msg,
2368 &np->daddr, ntohs(inet->inet_dport), 2367 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
2369 tp->snd_cwnd, tcp_left_out(tp), 2368 tp->snd_cwnd, tcp_left_out(tp),
2370 tp->snd_ssthresh, tp->prior_ssthresh, 2369 tp->snd_ssthresh, tp->prior_ssthresh,
2371 tp->packets_out); 2370 tp->packets_out);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7c777089a4d6..896e9dfbdb5c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1992,12 +1992,14 @@ static int tcp_mtu_probe(struct sock *sk)
1992 len = 0; 1992 len = 0;
1993 tcp_for_write_queue_from_safe(skb, next, sk) { 1993 tcp_for_write_queue_from_safe(skb, next, sk) {
1994 copy = min_t(int, skb->len, probe_size - len); 1994 copy = min_t(int, skb->len, probe_size - len);
1995 if (nskb->ip_summed) 1995 if (nskb->ip_summed) {
1996 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1996 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1997 else 1997 } else {
1998 nskb->csum = skb_copy_and_csum_bits(skb, 0, 1998 __wsum csum = skb_copy_and_csum_bits(skb, 0,
1999 skb_put(nskb, copy), 1999 skb_put(nskb, copy),
2000 copy, nskb->csum); 2000 copy, 0);
2001 nskb->csum = csum_block_add(nskb->csum, csum, len);
2002 }
2001 2003
2002 if (skb->len <= copy) { 2004 if (skb->len <= copy) {
2003 /* We've eaten all the data from this skb. 2005 /* We've eaten all the data from this skb.
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4ce74f86291b..d7d6d3ae0b3b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -648,7 +648,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
648 encap_limit = t->parms.encap_limit; 648 encap_limit = t->parms.encap_limit;
649 649
650 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 650 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
651 fl6.flowi6_proto = skb->protocol;
652 651
653 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 652 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM));
654 if (err) 653 if (err)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index fccb5dd91902..7f4265b1649b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2285,8 +2285,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2285 return 1; 2285 return 1;
2286} 2286}
2287 2287
2288int ip6mr_get_route(struct net *net, 2288int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2289 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 2289 int nowait, u32 portid)
2290{ 2290{
2291 int err; 2291 int err;
2292 struct mr6_table *mrt; 2292 struct mr6_table *mrt;
@@ -2331,6 +2331,7 @@ int ip6mr_get_route(struct net *net,
2331 return -ENOMEM; 2331 return -ENOMEM;
2332 } 2332 }
2333 2333
2334 NETLINK_CB(skb2).portid = portid;
2334 skb_reset_transport_header(skb2); 2335 skb_reset_transport_header(skb2);
2335 2336
2336 skb_put(skb2, sizeof(struct ipv6hdr)); 2337 skb_put(skb2, sizeof(struct ipv6hdr));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5a5aeb92b4ec..bdbc38e8bf29 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3216,7 +3216,9 @@ static int rt6_fill_node(struct net *net,
3216 if (iif) { 3216 if (iif) {
3217#ifdef CONFIG_IPV6_MROUTE 3217#ifdef CONFIG_IPV6_MROUTE
3218 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { 3218 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3219 int err = ip6mr_get_route(net, skb, rtm, nowait); 3219 int err = ip6mr_get_route(net, skb, rtm, nowait,
3220 portid);
3221
3220 if (err <= 0) { 3222 if (err <= 0) {
3221 if (!nowait) { 3223 if (!nowait) {
3222 if (err == 0) 3224 if (err == 0)
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index ccf7b4b655fe..95c463cbb9a6 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -53,7 +53,7 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
53 u32 *tlv = (u32 *)(skbdata); 53 u32 *tlv = (u32 *)(skbdata);
54 u16 totlen = nla_total_size(dlen); /*alignment + hdr */ 54 u16 totlen = nla_total_size(dlen); /*alignment + hdr */
55 char *dptr = (char *)tlv + NLA_HDRLEN; 55 char *dptr = (char *)tlv + NLA_HDRLEN;
56 u32 htlv = attrtype << 16 | dlen; 56 u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
57 57
58 *tlv = htonl(htlv); 58 *tlv = htonl(htlv);
59 memset(dptr, 0, totlen - NLA_HDRLEN); 59 memset(dptr, 0, totlen - NLA_HDRLEN);
@@ -653,7 +653,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
653 struct tcf_ife_info *ife = to_ife(a); 653 struct tcf_ife_info *ife = to_ife(a);
654 int action = ife->tcf_action; 654 int action = ife->tcf_action;
655 struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data; 655 struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
656 u16 ifehdrln = ifehdr->metalen; 656 int ifehdrln = (int)ifehdr->metalen;
657 struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data); 657 struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
658 658
659 spin_lock(&ife->tcf_lock); 659 spin_lock(&ife->tcf_lock);
@@ -766,8 +766,6 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
766 return TC_ACT_SHOT; 766 return TC_ACT_SHOT;
767 } 767 }
768 768
769 iethh = eth_hdr(skb);
770
771 err = skb_cow_head(skb, hdrm); 769 err = skb_cow_head(skb, hdrm);
772 if (unlikely(err)) { 770 if (unlikely(err)) {
773 ife->tcf_qstats.drops++; 771 ife->tcf_qstats.drops++;
@@ -778,6 +776,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
778 if (!(at & AT_EGRESS)) 776 if (!(at & AT_EGRESS))
779 skb_push(skb, skb->dev->hard_header_len); 777 skb_push(skb, skb->dev->hard_header_len);
780 778
779 iethh = (struct ethhdr *)skb->data;
781 __skb_push(skb, hdrm); 780 __skb_push(skb, hdrm);
782 memcpy(skb->data, iethh, skb->mac_len); 781 memcpy(skb->data, iethh, skb->mac_len);
783 skb_reset_mac_header(skb); 782 skb_reset_mac_header(skb);
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index f27ffee106f6..ca0516e6f743 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1153,6 +1153,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
1153 if (!skb) 1153 if (!skb)
1154 return NULL; 1154 return NULL;
1155 1155
1156 qdisc_qstats_backlog_dec(sch, skb);
1156 sch->q.qlen--; 1157 sch->q.qlen--;
1157 qdisc_bstats_update(sch, skb); 1158 qdisc_bstats_update(sch, skb);
1158 1159
@@ -1256,6 +1257,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1256 } 1257 }
1257 1258
1258 bstats_update(&cl->bstats, skb); 1259 bstats_update(&cl->bstats, skb);
1260 qdisc_qstats_backlog_inc(sch, skb);
1259 ++sch->q.qlen; 1261 ++sch->q.qlen;
1260 1262
1261 agg = cl->agg; 1263 agg = cl->agg;
@@ -1476,6 +1478,7 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
1476 qdisc_reset(cl->qdisc); 1478 qdisc_reset(cl->qdisc);
1477 } 1479 }
1478 } 1480 }
1481 sch->qstats.backlog = 0;
1479 sch->q.qlen = 0; 1482 sch->q.qlen = 0;
1480} 1483}
1481 1484
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index add3cc7d37ec..20a350bd1b1d 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -400,6 +400,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
400enqueue: 400enqueue:
401 ret = qdisc_enqueue(skb, child, to_free); 401 ret = qdisc_enqueue(skb, child, to_free);
402 if (likely(ret == NET_XMIT_SUCCESS)) { 402 if (likely(ret == NET_XMIT_SUCCESS)) {
403 qdisc_qstats_backlog_inc(sch, skb);
403 sch->q.qlen++; 404 sch->q.qlen++;
404 increment_qlen(skb, q); 405 increment_qlen(skb, q);
405 } else if (net_xmit_drop_count(ret)) { 406 } else if (net_xmit_drop_count(ret)) {
@@ -428,6 +429,7 @@ static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
428 429
429 if (skb) { 430 if (skb) {
430 qdisc_bstats_update(sch, skb); 431 qdisc_bstats_update(sch, skb);
432 qdisc_qstats_backlog_dec(sch, skb);
431 sch->q.qlen--; 433 sch->q.qlen--;
432 decrement_qlen(skb, q); 434 decrement_qlen(skb, q);
433 } 435 }
@@ -450,6 +452,7 @@ static void sfb_reset(struct Qdisc *sch)
450 struct sfb_sched_data *q = qdisc_priv(sch); 452 struct sfb_sched_data *q = qdisc_priv(sch);
451 453
452 qdisc_reset(q->qdisc); 454 qdisc_reset(q->qdisc);
455 sch->qstats.backlog = 0;
453 sch->q.qlen = 0; 456 sch->q.qlen = 0;
454 q->slot = 0; 457 q->slot = 0;
455 q->double_buffering = false; 458 q->double_buffering = false;
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 8afe2e90d003..7a1cdf43e49d 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -192,6 +192,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
192 msg, msg->expires_at, jiffies); 192 msg, msg->expires_at, jiffies);
193 } 193 }
194 194
195 if (asoc->peer.prsctp_capable &&
196 SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
197 msg->expires_at =
198 jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
199
195 /* This is the biggest possible DATA chunk that can fit into 200 /* This is the biggest possible DATA chunk that can fit into
196 * the packet 201 * the packet
197 */ 202 */
@@ -349,7 +354,7 @@ errout:
349/* Check whether this message has expired. */ 354/* Check whether this message has expired. */
350int sctp_chunk_abandoned(struct sctp_chunk *chunk) 355int sctp_chunk_abandoned(struct sctp_chunk *chunk)
351{ 356{
352 if (!chunk->asoc->prsctp_enable || 357 if (!chunk->asoc->peer.prsctp_capable ||
353 !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) { 358 !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) {
354 struct sctp_datamsg *msg = chunk->msg; 359 struct sctp_datamsg *msg = chunk->msg;
355 360
@@ -363,14 +368,14 @@ int sctp_chunk_abandoned(struct sctp_chunk *chunk)
363 } 368 }
364 369
365 if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && 370 if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) &&
366 time_after(jiffies, chunk->prsctp_param)) { 371 time_after(jiffies, chunk->msg->expires_at)) {
367 if (chunk->sent_count) 372 if (chunk->sent_count)
368 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++; 373 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++;
369 else 374 else
370 chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; 375 chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++;
371 return 1; 376 return 1;
372 } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && 377 } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) &&
373 chunk->sent_count > chunk->prsctp_param) { 378 chunk->sent_count > chunk->sinfo.sinfo_timetolive) {
374 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; 379 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++;
375 return 1; 380 return 1;
376 } 381 }
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 3ec6da8bbb53..582585393d35 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -304,7 +304,7 @@ void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
304 "illegal chunk"); 304 "illegal chunk");
305 305
306 sctp_outq_tail_data(q, chunk); 306 sctp_outq_tail_data(q, chunk);
307 if (chunk->asoc->prsctp_enable && 307 if (chunk->asoc->peer.prsctp_capable &&
308 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) 308 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
309 chunk->asoc->sent_cnt_removable++; 309 chunk->asoc->sent_cnt_removable++;
310 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) 310 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
@@ -354,7 +354,7 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
354 354
355 list_for_each_entry_safe(chk, temp, queue, transmitted_list) { 355 list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
356 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 356 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
357 chk->prsctp_param <= sinfo->sinfo_timetolive) 357 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
358 continue; 358 continue;
359 359
360 list_del_init(&chk->transmitted_list); 360 list_del_init(&chk->transmitted_list);
@@ -389,7 +389,7 @@ static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
389 389
390 list_for_each_entry_safe(chk, temp, queue, list) { 390 list_for_each_entry_safe(chk, temp, queue, list) {
391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
392 chk->prsctp_param <= sinfo->sinfo_timetolive) 392 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)
393 continue; 393 continue;
394 394
395 list_del_init(&chk->list); 395 list_del_init(&chk->list);
@@ -413,7 +413,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc,
413{ 413{
414 struct sctp_transport *transport; 414 struct sctp_transport *transport;
415 415
416 if (!asoc->prsctp_enable || !asoc->sent_cnt_removable) 416 if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
417 return; 417 return;
418 418
419 msg_len = sctp_prsctp_prune_sent(asoc, sinfo, 419 msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
@@ -1026,7 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1026 1026
1027 /* Mark as failed send. */ 1027 /* Mark as failed send. */
1028 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 1028 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
1029 if (asoc->prsctp_enable && 1029 if (asoc->peer.prsctp_capable &&
1030 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) 1030 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1031 asoc->sent_cnt_removable--; 1031 asoc->sent_cnt_removable--;
1032 sctp_chunk_free(chunk); 1032 sctp_chunk_free(chunk);
@@ -1319,7 +1319,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1319 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1319 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1320 if (TSN_lte(tsn, ctsn)) { 1320 if (TSN_lte(tsn, ctsn)) {
1321 list_del_init(&tchunk->transmitted_list); 1321 list_del_init(&tchunk->transmitted_list);
1322 if (asoc->prsctp_enable && 1322 if (asoc->peer.prsctp_capable &&
1323 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) 1323 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1324 asoc->sent_cnt_removable--; 1324 asoc->sent_cnt_removable--;
1325 sctp_chunk_free(tchunk); 1325 sctp_chunk_free(tchunk);
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c
index 807158e32f5f..048954eee984 100644
--- a/net/sctp/sctp_diag.c
+++ b/net/sctp/sctp_diag.c
@@ -276,28 +276,17 @@ out:
276 return err; 276 return err;
277} 277}
278 278
279static int sctp_tsp_dump(struct sctp_transport *tsp, void *p) 279static int sctp_sock_dump(struct sock *sk, void *p)
280{ 280{
281 struct sctp_endpoint *ep = tsp->asoc->ep; 281 struct sctp_endpoint *ep = sctp_sk(sk)->ep;
282 struct sctp_comm_param *commp = p; 282 struct sctp_comm_param *commp = p;
283 struct sock *sk = ep->base.sk;
284 struct sk_buff *skb = commp->skb; 283 struct sk_buff *skb = commp->skb;
285 struct netlink_callback *cb = commp->cb; 284 struct netlink_callback *cb = commp->cb;
286 const struct inet_diag_req_v2 *r = commp->r; 285 const struct inet_diag_req_v2 *r = commp->r;
287 struct sctp_association *assoc = 286 struct sctp_association *assoc;
288 list_entry(ep->asocs.next, struct sctp_association, asocs);
289 int err = 0; 287 int err = 0;
290 288
291 /* find the ep only once through the transports by this condition */
292 if (tsp->asoc != assoc)
293 goto out;
294
295 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
296 goto out;
297
298 lock_sock(sk); 289 lock_sock(sk);
299 if (sk != assoc->base.sk)
300 goto release;
301 list_for_each_entry(assoc, &ep->asocs, asocs) { 290 list_for_each_entry(assoc, &ep->asocs, asocs) {
302 if (cb->args[4] < cb->args[1]) 291 if (cb->args[4] < cb->args[1])
303 goto next; 292 goto next;
@@ -317,7 +306,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
317 NLM_F_MULTI, cb->nlh, 306 NLM_F_MULTI, cb->nlh,
318 commp->net_admin) < 0) { 307 commp->net_admin) < 0) {
319 cb->args[3] = 1; 308 cb->args[3] = 1;
320 err = 2; 309 err = 1;
321 goto release; 310 goto release;
322 } 311 }
323 cb->args[3] = 1; 312 cb->args[3] = 1;
@@ -327,7 +316,7 @@ static int sctp_tsp_dump(struct sctp_transport *tsp, void *p)
327 NETLINK_CB(cb->skb).portid, 316 NETLINK_CB(cb->skb).portid,
328 cb->nlh->nlmsg_seq, 0, cb->nlh, 317 cb->nlh->nlmsg_seq, 0, cb->nlh,
329 commp->net_admin) < 0) { 318 commp->net_admin) < 0) {
330 err = 2; 319 err = 1;
331 goto release; 320 goto release;
332 } 321 }
333next: 322next:
@@ -339,10 +328,35 @@ next:
339 cb->args[4] = 0; 328 cb->args[4] = 0;
340release: 329release:
341 release_sock(sk); 330 release_sock(sk);
331 sock_put(sk);
342 return err; 332 return err;
333}
334
335static int sctp_get_sock(struct sctp_transport *tsp, void *p)
336{
337 struct sctp_endpoint *ep = tsp->asoc->ep;
338 struct sctp_comm_param *commp = p;
339 struct sock *sk = ep->base.sk;
340 struct netlink_callback *cb = commp->cb;
341 const struct inet_diag_req_v2 *r = commp->r;
342 struct sctp_association *assoc =
343 list_entry(ep->asocs.next, struct sctp_association, asocs);
344
345 /* find the ep only once through the transports by this condition */
346 if (tsp->asoc != assoc)
347 goto out;
348
349 if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
350 goto out;
351
352 sock_hold(sk);
353 cb->args[5] = (long)sk;
354
355 return 1;
356
343out: 357out:
344 cb->args[2]++; 358 cb->args[2]++;
345 return err; 359 return 0;
346} 360}
347 361
348static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) 362static int sctp_ep_dump(struct sctp_endpoint *ep, void *p)
@@ -480,10 +494,18 @@ skip:
480 * 2 : to record the transport pos of this time's traversal 494 * 2 : to record the transport pos of this time's traversal
481 * 3 : to mark if we have dumped the ep info of the current asoc 495 * 3 : to mark if we have dumped the ep info of the current asoc
482 * 4 : to work as a temporary variable to traversal list 496 * 4 : to work as a temporary variable to traversal list
497 * 5 : to save the sk we get from travelsing the tsp list.
483 */ 498 */
484 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) 499 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE)))
485 goto done; 500 goto done;
486 sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); 501
502next:
503 cb->args[5] = 0;
504 sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp);
505
506 if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp))
507 goto next;
508
487done: 509done:
488 cb->args[1] = cb->args[4]; 510 cb->args[1] = cb->args[4];
489 cb->args[4] = 0; 511 cb->args[4] = 0;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 79dd66079dd7..9e9690b7afe1 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -706,20 +706,6 @@ nodata:
706 return retval; 706 return retval;
707} 707}
708 708
709static void sctp_set_prsctp_policy(struct sctp_chunk *chunk,
710 const struct sctp_sndrcvinfo *sinfo)
711{
712 if (!chunk->asoc->prsctp_enable)
713 return;
714
715 if (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags))
716 chunk->prsctp_param =
717 jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive);
718 else if (SCTP_PR_RTX_ENABLED(sinfo->sinfo_flags) ||
719 SCTP_PR_PRIO_ENABLED(sinfo->sinfo_flags))
720 chunk->prsctp_param = sinfo->sinfo_timetolive;
721}
722
723/* Make a DATA chunk for the given association from the provided 709/* Make a DATA chunk for the given association from the provided
724 * parameters. However, do not populate the data payload. 710 * parameters. However, do not populate the data payload.
725 */ 711 */
@@ -753,7 +739,6 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
753 739
754 retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); 740 retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
755 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); 741 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
756 sctp_set_prsctp_policy(retval, sinfo);
757 742
758nodata: 743nodata:
759 return retval; 744 return retval;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6cdc61c21438..fb02c7033307 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4473,17 +4473,21 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
4473 const union sctp_addr *paddr, void *p) 4473 const union sctp_addr *paddr, void *p)
4474{ 4474{
4475 struct sctp_transport *transport; 4475 struct sctp_transport *transport;
4476 int err = 0; 4476 int err = -ENOENT;
4477 4477
4478 rcu_read_lock(); 4478 rcu_read_lock();
4479 transport = sctp_addrs_lookup_transport(net, laddr, paddr); 4479 transport = sctp_addrs_lookup_transport(net, laddr, paddr);
4480 if (!transport || !sctp_transport_hold(transport)) 4480 if (!transport || !sctp_transport_hold(transport))
4481 goto out; 4481 goto out;
4482 err = cb(transport, p); 4482
4483 sctp_association_hold(transport->asoc);
4483 sctp_transport_put(transport); 4484 sctp_transport_put(transport);
4484 4485
4485out:
4486 rcu_read_unlock(); 4486 rcu_read_unlock();
4487 err = cb(transport, p);
4488 sctp_association_put(transport->asoc);
4489
4490out:
4487 return err; 4491 return err;
4488} 4492}
4489EXPORT_SYMBOL_GPL(sctp_transport_lookup_process); 4493EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 17dbbe64cd73..8a398b3fb532 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -465,6 +465,8 @@ void vsock_pending_work(struct work_struct *work)
465 465
466 if (vsock_is_pending(sk)) { 466 if (vsock_is_pending(sk)) {
467 vsock_remove_pending(listener, sk); 467 vsock_remove_pending(listener, sk);
468
469 listener->sk_ack_backlog--;
468 } else if (!vsk->rejected) { 470 } else if (!vsk->rejected) {
469 /* We are not on the pending list and accept() did not reject 471 /* We are not on the pending list and accept() did not reject
470 * us, so we must have been accepted by our user process. We 472 * us, so we must have been accepted by our user process. We
@@ -475,8 +477,6 @@ void vsock_pending_work(struct work_struct *work)
475 goto out; 477 goto out;
476 } 478 }
477 479
478 listener->sk_ack_backlog--;
479
480 /* We need to remove ourself from the global connected sockets list so 480 /* We need to remove ourself from the global connected sockets list so
481 * incoming packets can't find this socket, and to reduce the reference 481 * incoming packets can't find this socket, and to reduce the reference
482 * count. 482 * count.
@@ -2010,5 +2010,5 @@ EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2010 2010
2011MODULE_AUTHOR("VMware, Inc."); 2011MODULE_AUTHOR("VMware, Inc.");
2012MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2012MODULE_DESCRIPTION("VMware Virtual Socket Family");
2013MODULE_VERSION("1.0.1.0-k"); 2013MODULE_VERSION("1.0.2.0-k");
2014MODULE_LICENSE("GPL v2"); 2014MODULE_LICENSE("GPL v2");
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 42396a74405d..a68f03133df9 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -363,6 +363,7 @@ is_mcounted_section_name(char const *const txtname)
363 strcmp(".sched.text", txtname) == 0 || 363 strcmp(".sched.text", txtname) == 0 ||
364 strcmp(".spinlock.text", txtname) == 0 || 364 strcmp(".spinlock.text", txtname) == 0 ||
365 strcmp(".irqentry.text", txtname) == 0 || 365 strcmp(".irqentry.text", txtname) == 0 ||
366 strcmp(".softirqentry.text", txtname) == 0 ||
366 strcmp(".kprobes.text", txtname) == 0 || 367 strcmp(".kprobes.text", txtname) == 0 ||
367 strcmp(".text.unlikely", txtname) == 0; 368 strcmp(".text.unlikely", txtname) == 0;
368} 369}
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 96e2486a6fc4..2d48011bc362 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -134,6 +134,7 @@ my %text_sections = (
134 ".sched.text" => 1, 134 ".sched.text" => 1,
135 ".spinlock.text" => 1, 135 ".spinlock.text" => 1,
136 ".irqentry.text" => 1, 136 ".irqentry.text" => 1,
137 ".softirqentry.text" => 1,
137 ".kprobes.text" => 1, 138 ".kprobes.text" => 1,
138 ".text.unlikely" => 1, 139 ".text.unlikely" => 1,
139); 140);
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 5adbfc32242f..17a06105ccb6 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -29,6 +29,7 @@
29#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
30#include <linux/scatterlist.h> 30#include <linux/scatterlist.h>
31#include <linux/ctype.h> 31#include <linux/ctype.h>
32#include <crypto/aes.h>
32#include <crypto/hash.h> 33#include <crypto/hash.h>
33#include <crypto/sha.h> 34#include <crypto/sha.h>
34#include <crypto/skcipher.h> 35#include <crypto/skcipher.h>
@@ -478,6 +479,7 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
478 struct crypto_skcipher *tfm; 479 struct crypto_skcipher *tfm;
479 struct skcipher_request *req; 480 struct skcipher_request *req;
480 unsigned int encrypted_datalen; 481 unsigned int encrypted_datalen;
482 u8 iv[AES_BLOCK_SIZE];
481 unsigned int padlen; 483 unsigned int padlen;
482 char pad[16]; 484 char pad[16];
483 int ret; 485 int ret;
@@ -500,8 +502,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
500 sg_init_table(sg_out, 1); 502 sg_init_table(sg_out, 1);
501 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); 503 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
502 504
503 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, 505 memcpy(iv, epayload->iv, sizeof(iv));
504 epayload->iv); 506 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
505 ret = crypto_skcipher_encrypt(req); 507 ret = crypto_skcipher_encrypt(req);
506 tfm = crypto_skcipher_reqtfm(req); 508 tfm = crypto_skcipher_reqtfm(req);
507 skcipher_request_free(req); 509 skcipher_request_free(req);
@@ -581,6 +583,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
581 struct crypto_skcipher *tfm; 583 struct crypto_skcipher *tfm;
582 struct skcipher_request *req; 584 struct skcipher_request *req;
583 unsigned int encrypted_datalen; 585 unsigned int encrypted_datalen;
586 u8 iv[AES_BLOCK_SIZE];
584 char pad[16]; 587 char pad[16];
585 int ret; 588 int ret;
586 589
@@ -599,8 +602,8 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
599 epayload->decrypted_datalen); 602 epayload->decrypted_datalen);
600 sg_set_buf(&sg_out[1], pad, sizeof pad); 603 sg_set_buf(&sg_out[1], pad, sizeof pad);
601 604
602 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, 605 memcpy(iv, epayload->iv, sizeof(iv));
603 epayload->iv); 606 skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
604 ret = crypto_skcipher_decrypt(req); 607 ret = crypto_skcipher_decrypt(req);
605 tfm = crypto_skcipher_reqtfm(req); 608 tfm = crypto_skcipher_reqtfm(req);
606 skcipher_request_free(req); 609 skcipher_request_free(req);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index dd48f421844c..f64c57bf1d4b 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -603,7 +603,8 @@ static int nfit_test0_alloc(struct nfit_test *t)
603 return -ENOMEM; 603 return -ENOMEM;
604 sprintf(t->label[i], "label%d", i); 604 sprintf(t->label[i], "label%d", i);
605 605
606 t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS, 606 t->flush[i] = test_alloc(t, max(PAGE_SIZE,
607 sizeof(u64) * NUM_HINTS),
607 &t->flush_dma[i]); 608 &t->flush_dma[i]);
608 if (!t->flush[i]) 609 if (!t->flush[i])
609 return -ENOMEM; 610 return -ENOMEM;
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 3b530467148e..9d0919ed52a4 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -1,5 +1,5 @@
1 1
2CFLAGS += -I. -g -Wall -D_LGPL_SOURCE 2CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
3LDFLAGS += -lpthread -lurcu 3LDFLAGS += -lpthread -lurcu
4TARGETS = main 4TARGETS = main
5OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \ 5OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 39d9b9568fe2..05d7bc488971 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -124,6 +124,8 @@ static void multiorder_check(unsigned long index, int order)
124 unsigned long i; 124 unsigned long i;
125 unsigned long min = index & ~((1UL << order) - 1); 125 unsigned long min = index & ~((1UL << order) - 1);
126 unsigned long max = min + (1UL << order); 126 unsigned long max = min + (1UL << order);
127 void **slot;
128 struct item *item2 = item_create(min);
127 RADIX_TREE(tree, GFP_KERNEL); 129 RADIX_TREE(tree, GFP_KERNEL);
128 130
129 printf("Multiorder index %ld, order %d\n", index, order); 131 printf("Multiorder index %ld, order %d\n", index, order);
@@ -139,13 +141,19 @@ static void multiorder_check(unsigned long index, int order)
139 item_check_absent(&tree, i); 141 item_check_absent(&tree, i);
140 for (i = max; i < 2*max; i++) 142 for (i = max; i < 2*max; i++)
141 item_check_absent(&tree, i); 143 item_check_absent(&tree, i);
144 for (i = min; i < max; i++)
145 assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
146
147 slot = radix_tree_lookup_slot(&tree, index);
148 free(*slot);
149 radix_tree_replace_slot(slot, item2);
142 for (i = min; i < max; i++) { 150 for (i = min; i < max; i++) {
143 static void *entry = (void *) 151 struct item *item = item_lookup(&tree, i);
144 (0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY); 152 assert(item != 0);
145 assert(radix_tree_insert(&tree, i, entry) == -EEXIST); 153 assert(item->index == min);
146 } 154 }
147 155
148 assert(item_delete(&tree, index) != 0); 156 assert(item_delete(&tree, min) != 0);
149 157
150 for (i = 0; i < 2*max; i++) 158 for (i = 0; i < 2*max; i++)
151 item_check_absent(&tree, i); 159 item_check_absent(&tree, i);