aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/input/event-codes.txt13
-rw-r--r--MAINTAINERS6
-rw-r--r--Makefile4
-rw-r--r--arch/arm/boot/dts/hi3620.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi4
-rw-r--r--arch/arm/crypto/aesbs-glue.c10
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/kernel/devtree.c8
-rw-r--r--arch/arm/mach-exynos/exynos.c10
-rw-r--r--arch/arm/mach-omap2/gpmc-nand.c18
-rw-r--r--arch/arm/mach-omap2/omap4-common.c4
-rw-r--r--arch/arm/xen/grant-table.c5
-rw-r--r--arch/arm64/crypto/aes-glue.c12
-rw-r--r--arch/arm64/mm/init.c17
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c1
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c6
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf609/include/mach/pm.h5
-rw-r--r--arch/blackfin/mach-bf609/pm.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/x86/kernel/cpu/intel.c22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c69
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c11
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/kprobes/core.c3
-rw-r--r--arch/x86/xen/grant-table.c148
-rw-r--r--arch/xtensa/kernel/vectors.S158
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--drivers/ata/libata-core.c16
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c87
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c1
-rw-r--r--drivers/net/can/c_can/c_can_platform.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c5
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c22
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/of/fdt.c66
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/xen/grant-table.c9
-rw-r--r--fs/afs/main.c4
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/direct-io.c14
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/xattr.c2
-rw-r--r--include/dt-bindings/pinctrl/dra.h7
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/of_fdt.h3
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/net/ip.h11
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--kernel/events/core.c32
-rw-r--r--kernel/kprobes.c14
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c3
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/page_alloc.c15
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c102
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/truncate.c11
-rw-r--r--net/compat.c9
-rw-r--r--net/core/iovec.c6
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/ipv4/route.c32
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c1
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c7
-rw-r--r--sound/firewire/bebob/bebob_maudio.c53
-rw-r--r--virt/kvm/arm/vgic.c24
136 files changed, 1148 insertions, 470 deletions
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c69648d..c587a966413e 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT 281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
282device. 282device.
283 283
284INPUT_PROP_TOPBUTTONPAD:
285-----------------------
286Some laptops, most notably the Lenovo *40 series provide a trackstick
287device but do not have physical buttons associated with the trackstick
288device. Instead, the top area of the touchpad is marked to show
289visual/haptic areas for left, middle, right buttons intended to be used
290with the trackstick.
291
292If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
293accordingly. This property does not affect kernel behavior.
294The kernel does not provide button emulation for such devices but treats
295them as any other INPUT_PROP_BUTTONPAD device.
296
284Guidelines: 297Guidelines:
285========== 298==========
286The guidelines below ensure proper single-touch and multi-finger functionality. 299The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/MAINTAINERS b/MAINTAINERS
index 9faf35c9f899..c77a0effe5dd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6961,6 +6961,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6961S: Maintained 6961S: Maintained
6962F: drivers/pinctrl/pinctrl-at91.c 6962F: drivers/pinctrl/pinctrl-at91.c
6963 6963
6964PIN CONTROLLER - RENESAS
6965M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6966L: linux-sh@vger.kernel.org
6967S: Maintained
6968F: drivers/pinctrl/sh-pfc/
6969
6964PIN CONTROLLER - SAMSUNG 6970PIN CONTROLLER - SAMSUNG
6965M: Tomasz Figa <t.figa@samsung.com> 6971M: Tomasz Figa <t.figa@samsung.com>
6966M: Thomas Abraham <thomas.abraham@linaro.org> 6972M: Thomas Abraham <thomas.abraham@linaro.org>
diff --git a/Makefile b/Makefile
index 6b2774145d66..f6a7794e4db4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
688endif 688endif
689endif 689endif
690 690
691KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
692
691ifdef CONFIG_DEBUG_INFO 693ifdef CONFIG_DEBUG_INFO
692KBUILD_CFLAGS += -g 694KBUILD_CFLAGS += -g
693KBUILD_AFLAGS += -Wa,-gdwarf-2 695KBUILD_AFLAGS += -Wa,-gdwarf-2
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi
index ab1116d086be..83a5b8685bd9 100644
--- a/arch/arm/boot/dts/hi3620.dtsi
+++ b/arch/arm/boot/dts/hi3620.dtsi
@@ -73,7 +73,7 @@
73 73
74 L2: l2-cache { 74 L2: l2-cache {
75 compatible = "arm,pl310-cache"; 75 compatible = "arm,pl310-cache";
76 reg = <0xfc10000 0x100000>; 76 reg = <0x100000 0x100000>;
77 interrupts = <0 15 4>; 77 interrupts = <0 15 4>;
78 cache-unified; 78 cache-unified;
79 cache-level = <2>; 79 cache-level = <2>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 1fe45d1f75ec..b15f1a77d684 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -353,7 +353,7 @@
353 }; 353 };
354 354
355 twl_power: power { 355 twl_power: power {
356 compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; 356 compatible = "ti,twl4030-power-n900";
357 ti,use_poweroff; 357 ti,use_poweroff;
358 }; 358 };
359}; 359};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 8d7ffaeff6e0..79f68acfd5d4 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -540,9 +540,9 @@
540 #clock-cells = <0>; 540 #clock-cells = <0>;
541 clock-output-names = "sd1"; 541 clock-output-names = "sd1";
542 }; 542 };
543 sd2_clk: sd3_clk@e615007c { 543 sd2_clk: sd3_clk@e615026c {
544 compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock"; 544 compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
545 reg = <0 0xe615007c 0 4>; 545 reg = <0 0xe615026c 0 4>;
546 clocks = <&pll1_div2_clk>; 546 clocks = <&pll1_div2_clk>;
547 #clock-cells = <0>; 547 #clock-cells = <0>;
548 clock-output-names = "sd2"; 548 clock-output-names = "sd2";
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366da759..15468fbbdea3 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
137 dst += AES_BLOCK_SIZE; 137 dst += AES_BLOCK_SIZE;
138 } while (--blocks); 138 } while (--blocks);
139 } 139 }
140 err = blkcipher_walk_done(desc, &walk, 0); 140 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
141 } 141 }
142 return err; 142 return err;
143} 143}
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
159 walk.nbytes, &ctx->dec, walk.iv); 159 walk.nbytes, &ctx->dec, walk.iv);
160 kernel_neon_end(); 160 kernel_neon_end();
161 err = blkcipher_walk_done(desc, &walk, 0); 161 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
162 } 162 }
163 while (walk.nbytes) { 163 while (walk.nbytes) {
164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; 164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
182 dst += AES_BLOCK_SIZE; 182 dst += AES_BLOCK_SIZE;
183 src += AES_BLOCK_SIZE; 183 src += AES_BLOCK_SIZE;
184 } while (--blocks); 184 } while (--blocks);
185 err = blkcipher_walk_done(desc, &walk, 0); 185 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
186 } 186 }
187 return err; 187 return err;
188} 188}
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
269 walk.nbytes, &ctx->enc, walk.iv); 269 walk.nbytes, &ctx->enc, walk.iv);
270 kernel_neon_end(); 270 kernel_neon_end();
271 err = blkcipher_walk_done(desc, &walk, 0); 271 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
272 } 272 }
273 return err; 273 return err;
274} 274}
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
293 walk.nbytes, &ctx->dec, walk.iv); 293 walk.nbytes, &ctx->dec, walk.iv);
294 kernel_neon_end(); 294 kernel_neon_end();
295 err = blkcipher_walk_done(desc, &walk, 0); 295 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
296 } 296 }
297 return err; 297 return err;
298} 298}
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 060a75e99263..0406cb3f1af7 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -50,6 +50,7 @@ struct machine_desc {
50 struct smp_operations *smp; /* SMP operations */ 50 struct smp_operations *smp; /* SMP operations */
51 bool (*smp_init)(void); 51 bool (*smp_init)(void);
52 void (*fixup)(struct tag *, char **); 52 void (*fixup)(struct tag *, char **);
53 void (*dt_fixup)(void);
53 void (*init_meminfo)(void); 54 void (*init_meminfo)(void);
54 void (*reserve)(void);/* reserve mem blocks */ 55 void (*reserve)(void);/* reserve mem blocks */
55 void (*map_io)(void);/* IO mapping function */ 56 void (*map_io)(void);/* IO mapping function */
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index e94a157ddff1..11c54de9f8cf 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
212 mdesc_best = &__mach_desc_GENERIC_DT; 212 mdesc_best = &__mach_desc_GENERIC_DT;
213#endif 213#endif
214 214
215 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) 215 if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
216 return NULL; 216 return NULL;
217 217
218 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); 218 mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
237 dump_machine_table(); /* does not return */ 237 dump_machine_table(); /* does not return */
238 } 238 }
239 239
240 /* We really don't want to do this, but sometimes firmware provides buggy data */
241 if (mdesc->dt_fixup)
242 mdesc->dt_fixup();
243
244 early_init_dt_scan_nodes();
245
240 /* Change machine number to match the mdesc we're using */ 246 /* Change machine number to match the mdesc we're using */
241 __machine_arch_type = mdesc->nr; 247 __machine_arch_type = mdesc->nr;
242 248
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index 46d893fcbe85..66c9b9614f3c 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -335,6 +335,15 @@ static void __init exynos_reserve(void)
335#endif 335#endif
336} 336}
337 337
338static void __init exynos_dt_fixup(void)
339{
340 /*
341 * Some versions of uboot pass garbage entries in the memory node,
342 * use the old CONFIG_ARM_NR_BANKS
343 */
344 of_fdt_limit_memory(8);
345}
346
338DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") 347DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
339 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ 348 /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
340 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ 349 /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -348,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
348 .dt_compat = exynos_dt_compat, 357 .dt_compat = exynos_dt_compat,
349 .restart = exynos_restart, 358 .restart = exynos_restart,
350 .reserve = exynos_reserve, 359 .reserve = exynos_reserve,
360 .dt_fixup = exynos_dt_fixup,
351MACHINE_END 361MACHINE_END
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
index 17cd39360afe..93914d220069 100644
--- a/arch/arm/mach-omap2/gpmc-nand.c
+++ b/arch/arm/mach-omap2/gpmc-nand.c
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
50 soc_is_omap54xx() || soc_is_dra7xx()) 50 soc_is_omap54xx() || soc_is_dra7xx())
51 return 1; 51 return 1;
52 52
53 if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
54 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
55 if (cpu_is_omap24xx())
56 return 0;
57 else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
58 return 0;
59 else
60 return 1;
61 }
62
53 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes 63 /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
54 * which require H/W based ECC error detection */ 64 * which require H/W based ECC error detection */
55 if ((cpu_is_omap34xx() || cpu_is_omap3630()) && 65 if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
57 (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) 67 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
58 return 0; 68 return 0;
59 69
60 /*
61 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
62 * and AM33xx derivates. Other chips may be added if confirmed to work.
63 */
64 if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
65 (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
66 return 0;
67
68 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ 70 /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
69 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) 71 if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
70 return 1; 72 return 1;
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 539e8106eb96..a0fe747634c1 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
168 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; 168 smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
169 break; 169 break;
170 170
171 case L310_POWER_CTRL:
172 pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
173 return;
174
171 default: 175 default:
172 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); 176 WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
173 return; 177 return;
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c
index 859a9bb002d5..91cf08ba1e95 100644
--- a/arch/arm/xen/grant-table.c
+++ b/arch/arm/xen/grant-table.c
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
51{ 51{
52 return -ENOSYS; 52 return -ENOSYS;
53} 53}
54
55int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
56{
57 return 0;
58}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c12256..79cd911ef88c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
108 (u8 *)ctx->key_enc, rounds, blocks, first); 108 (u8 *)ctx->key_enc, rounds, blocks, first);
109 err = blkcipher_walk_done(desc, &walk, 0); 109 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
110 } 110 }
111 kernel_neon_end(); 111 kernel_neon_end();
112 return err; 112 return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
130 (u8 *)ctx->key_dec, rounds, blocks, first); 130 (u8 *)ctx->key_dec, rounds, blocks, first);
131 err = blkcipher_walk_done(desc, &walk, 0); 131 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
132 } 132 }
133 kernel_neon_end(); 133 kernel_neon_end();
134 return err; 134 return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, 152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
153 first); 153 first);
154 err = blkcipher_walk_done(desc, &walk, 0); 154 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
155 } 155 }
156 kernel_neon_end(); 156 kernel_neon_end();
157 return err; 157 return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv, 175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
176 first); 176 first);
177 err = blkcipher_walk_done(desc, &walk, 0); 177 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
178 } 178 }
179 kernel_neon_end(); 179 kernel_neon_end();
180 return err; 180 return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
244 (u8 *)ctx->key1.key_enc, rounds, blocks, 244 (u8 *)ctx->key1.key_enc, rounds, blocks,
245 (u8 *)ctx->key2.key_enc, walk.iv, first); 245 (u8 *)ctx->key2.key_enc, walk.iv, first);
246 err = blkcipher_walk_done(desc, &walk, 0); 246 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
247 } 247 }
248 kernel_neon_end(); 248 kernel_neon_end();
249 249
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
268 (u8 *)ctx->key1.key_dec, rounds, blocks, 268 (u8 *)ctx->key1.key_dec, rounds, blocks,
269 (u8 *)ctx->key2.key_enc, walk.iv, first); 269 (u8 *)ctx->key2.key_enc, walk.iv, first);
270 err = blkcipher_walk_done(desc, &walk, 0); 270 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
271 } 271 }
272 kernel_neon_end(); 272 kernel_neon_end();
273 273
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a69262..e90c5426fe14 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
60early_param("initrd", early_initrd); 60early_param("initrd", early_initrd);
61#endif 61#endif
62 62
63/*
64 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
65 * currently assumes that for memory starting above 4G, 32-bit devices will
66 * use a DMA offset.
67 */
68static phys_addr_t max_zone_dma_phys(void)
69{
70 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
71 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
72}
73
63static void __init zone_sizes_init(unsigned long min, unsigned long max) 74static void __init zone_sizes_init(unsigned long min, unsigned long max)
64{ 75{
65 struct memblock_region *reg; 76 struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
70 81
71 /* 4GB maximum for 32-bit only capable devices */ 82 /* 4GB maximum for 32-bit only capable devices */
72 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 83 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
73 unsigned long max_dma_phys = 84 max_dma = PFN_DOWN(max_zone_dma_phys());
74 (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
76 zone_size[ZONE_DMA] = max_dma - min; 85 zone_size[ZONE_DMA] = max_dma - min;
77 } 86 }
78 zone_size[ZONE_NORMAL] = max - max_dma; 87 zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
146 155
147 /* 4GB maximum for 32-bit only capable devices */ 156 /* 4GB maximum for 32-bit only capable devices */
148 if (IS_ENABLED(CONFIG_ZONE_DMA)) 157 if (IS_ENABLED(CONFIG_ZONE_DMA))
149 dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; 158 dma_phys_limit = max_zone_dma_phys();
150 dma_contiguous_reserve(dma_phys_limit); 159 dma_contiguous_reserve(dma_phys_limit);
151 160
152 memblock_allow_resize(); 161 memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd84183..fcec5ce71392 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
102CONFIG_I2C_BLACKFIN_TWI=y 102CONFIG_I2C_BLACKFIN_TWI=y
103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
104CONFIG_SPI=y 104CONFIG_SPI=y
105CONFIG_SPI_BFIN_V3=y 105CONFIG_SPI_ADI_V3=y
106CONFIG_GPIOLIB=y 106CONFIG_GPIOLIB=y
107CONFIG_GPIO_SYSFS=y 107CONFIG_GPIO_SYSFS=y
108# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864b2b74..c9eec84aa258 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
145 145
146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
147#else 147#else
148 .init.data : AT(__data_lma + __data_len) 148 .init.data : AT(__data_lma + __data_len + 32)
149 { 149 {
150 __sinitdata = .; 150 __sinitdata = .;
151 INIT_DATA 151 INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4fe760c..0ccf0cf4daaf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 21#include <linux/spi/flash.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/gpio.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6dbda3da..1e7290ef3525 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454b4bff..c7495dc74690 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a0211225748d..6b988ad653d8 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6112c1..1fe7ff286619 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), 2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), 2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), 2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
2141#endif 2141#endif
2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), 2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), 2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2144 PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2145 PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
2144}; 2146};
2145 2147
2146static int __init ezkit_init(void) 2148static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d5ccb1..6ab951534d79 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
44#include <linux/spi/flash.h> 44#include <linux/spi/flash.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/gpio.h>
47#include <linux/jiffies.h> 48#include <linux/jiffies.h>
48#include <linux/i2c-pca-platform.h> 49#include <linux/i2c-pca-platform.h>
49#include <linux/delay.h> 50#include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df4cacc..e862f7823e68 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
18#endif 18#endif
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/gpio.h>
21#include <asm/dma.h> 22#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
23#include <asm/portmux.h> 24#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43e7abe..2de71e8c104b 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/gpio.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/bfin5xx_spi.h> 20#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600de69f..e2c0b024ce88 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
698{ 698{
699#define CONFIG_SMC_GCTL_VAL 0x00000010 699#define CONFIG_SMC_GCTL_VAL 0x00000010
700 700
701 if (!devm_pinctrl_get_select_default(&pdev->dev))
702 return -EBUSY;
703 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); 701 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
704 bfin_write32(SMC_B0CTL, 0x01002011); 702 bfin_write32(SMC_B0CTL, 0x01002011);
705 bfin_write32(SMC_B0TIM, 0x08170977); 703 bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
709 707
710void bf609_nor_flash_exit(struct platform_device *pdev) 708void bf609_nor_flash_exit(struct platform_device *pdev)
711{ 709{
712 devm_pinctrl_put(pdev->dev.pins->p);
713 bfin_write32(SMC_GCTL, 0); 710 bfin_write32(SMC_GCTL, 0);
714} 711}
715 712
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2058 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2055 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2059 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2056 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2060 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), 2057 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
2061 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), 2058 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2062 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2059 PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
2063#if IS_ENABLED(CONFIG_VIDEO_MT9M114) 2060 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2064 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), 2061 PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2065#elif IS_ENABLED(CONFIG_VIDEO_VS6624) 2062 PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2063 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2067#else 2064 PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2065 PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2069#endif
2070 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2071 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2067 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2072 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), 2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb965636..a1efd936dd30 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
10#define __MACH_BF609_PM_H__ 10#define __MACH_BF609_PM_H__
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/platform_device.h>
13 14
14extern int bfin609_pm_enter(suspend_state_t state); 15extern int bfin609_pm_enter(suspend_state_t state);
15extern int bf609_pm_prepare(void); 16extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
19void bfin_sec_raise_irq(unsigned int sid); 20void bfin_sec_raise_irq(unsigned int sid);
20void coreb_enable(void); 21void coreb_enable(void);
21 22
22int bf609_nor_flash_init(void); 23int bf609_nor_flash_init(struct platform_device *pdev);
23void bf609_nor_flash_exit(void); 24void bf609_nor_flash_exit(struct platform_device *pdev);
24#endif 25#endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd6955c7be..b1bfcf434d16 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
292static int smc_pm_syscore_suspend(void) 292static int smc_pm_syscore_suspend(void)
293{ 293{
294 bf609_nor_flash_exit(); 294 bf609_nor_flash_exit(NULL);
295 return 0; 295 return 0;
296} 296}
297 297
298static void smc_pm_syscore_resume(void) 298static void smc_pm_syscore_resume(void)
299{ 299{
300 bf609_nor_flash_init(); 300 bf609_nor_flash_init(NULL);
301} 301}
302 302
303static struct syscore_ops smc_pm_syscore_ops = { 303static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7cef204c..1f94784eab6d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
1208 1208
1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); 1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1210 1210
1211 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1212
1213 /* Enable interrupts IVG7-15 */ 1211 /* Enable interrupts IVG7-15 */
1214 bfin_irq_flags |= IMASK_IVG15 | 1212 bfin_irq_flags |= IMASK_IVG15 |
1215 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 1213 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
69#define SA_NOMASK SA_NODEFER 69#define SA_NOMASK SA_NODEFER
70#define SA_ONESHOT SA_RESETHAND 70#define SA_ONESHOT SA_RESETHAND
71 71
72#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
73
74#define MINSIGSTKSZ 2048 72#define MINSIGSTKSZ 2048
75#define SIGSTKSZ 8192 73#define SIGSTKSZ 8192
76 74
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad0fba0..0bef864264c0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
728#endif 728#endif
729 729
730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
731 memset(empty_zero_page, 0, PAGE_SIZE);
732} 731}
733 732
734static void __init gateway_init(void) 733static void __init gateway_init(void)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc2347774f0a..0fdd7eece6d9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) 448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
450#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
450#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 451#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
451 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 452 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
452 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 453 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014dde821..c2b4dcf23d03 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <asm/pgtable-ppc64.h> 23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h> 24#include <asm/bug.h>
25#include <asm/processor.h>
25 26
26/* 27/*
27 * Segment table 28 * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
496 */ 497 */
497struct subpage_prot_table { 498struct subpage_prot_table {
498 unsigned long maxaddr; /* only addresses < this are protected */ 499 unsigned long maxaddr; /* only addresses < this are protected */
499 unsigned int **protptrs[2]; 500 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
500 unsigned int *low_prot[4]; 501 unsigned int *low_prot[4];
501}; 502};
502 503
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b4c2fa..0c157642c2a1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
527 .machine_check_early = __machine_check_early_realmode_p8, 527 .machine_check_early = __machine_check_early_realmode_p8,
528 .platform = "power8", 528 .platform = "power8",
529 }, 529 },
530 { /* Power8 DD1: Does not support doorbell IPIs */
531 .pvr_mask = 0xffffff00,
532 .pvr_value = 0x004d0100,
533 .cpu_name = "POWER8 (raw)",
534 .cpu_features = CPU_FTRS_POWER8_DD1,
535 .cpu_user_features = COMMON_USER_POWER8,
536 .cpu_user_features2 = COMMON_USER2_POWER8,
537 .mmu_features = MMU_FTRS_POWER8,
538 .icache_bsize = 128,
539 .dcache_bsize = 128,
540 .num_pmcs = 6,
541 .pmc_type = PPC_PMC_IBM,
542 .oprofile_cpu_type = "ppc64/power8",
543 .oprofile_type = PPC_OPROFILE_INVALID,
544 .cpu_setup = __setup_cpu_power8,
545 .cpu_restore = __restore_cpu_power8,
546 .flush_tlb = __flush_tlb_power8,
547 .machine_check_early = __machine_check_early_realmode_p8,
548 .platform = "power8",
549 },
530 { /* Power8 */ 550 { /* Power8 */
531 .pvr_mask = 0xffff0000, 551 .pvr_mask = 0xffff0000,
532 .pvr_value = 0x004d0000, 552 .pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d2025b..db2b482af658 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
611 for (f = flist; f; f = next) { 611 for (f = flist; f; f = next) {
612 /* Translate data addrs to absolute */ 612 /* Translate data addrs to absolute */
613 for (i = 0; i < f->num_blocks; i++) { 613 for (i = 0; i < f->num_blocks; i++) {
614 f->blocks[i].data = (char *)__pa(f->blocks[i].data); 614 f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
615 image_size += f->blocks[i].length; 615 image_size += f->blocks[i].length;
616 f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
616 } 617 }
617 next = f->next; 618 next = f->next;
618 /* Don't translate NULL pointer for last entry */ 619 /* Don't translate NULL pointer for last entry */
619 if (f->next) 620 if (f->next)
620 f->next = (struct flash_block_list *)__pa(f->next); 621 f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
621 else 622 else
622 f->next = NULL; 623 f->next = NULL;
623 /* make num_blocks into the version/length field */ 624 /* make num_blocks into the version/length field */
624 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); 625 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
626 f->num_blocks = cpu_to_be64(f->num_blocks);
625 } 627 }
626 628
627 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); 629 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96befbf..43435c6892fb 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@ _GLOBAL(memset)
77 stb r4,0(r6) 77 stb r4,0(r6)
78 blr 78 blr
79 79
80_GLOBAL(memmove) 80_GLOBAL_TOC(memmove)
81 cmplw 0,r3,r4 81 cmplw 0,r3,r4
82 bgt backwards_memcpy 82 bgt backwards_memcpy
83 b memcpy 83 b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46dd0b7..5c09f365c842 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1198 sh = regs->gpr[rb] & 0x3f; 1198 sh = regs->gpr[rb] & 0x3f;
1199 ival = (signed int) regs->gpr[rd]; 1199 ival = (signed int) regs->gpr[rd];
1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1201 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) 1201 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1202 regs->xer |= XER_CA; 1202 regs->xer |= XER_CA;
1203 else 1203 else
1204 regs->xer &= ~XER_CA; 1204 regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1208 sh = rb; 1208 sh = rb;
1209 ival = (signed int) regs->gpr[rd]; 1209 ival = (signed int) regs->gpr[rd];
1210 regs->gpr[ra] = ival >> sh; 1210 regs->gpr[ra] = ival >> sh;
1211 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1211 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1212 regs->xer |= XER_CA; 1212 regs->xer |= XER_CA;
1213 else 1213 else
1214 regs->xer &= ~XER_CA; 1214 regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1216 1216
1217#ifdef __powerpc64__ 1217#ifdef __powerpc64__
1218 case 27: /* sld */ 1218 case 27: /* sld */
1219 sh = regs->gpr[rd] & 0x7f; 1219 sh = regs->gpr[rb] & 0x7f;
1220 if (sh < 64) 1220 if (sh < 64)
1221 regs->gpr[ra] = regs->gpr[rd] << sh; 1221 regs->gpr[ra] = regs->gpr[rd] << sh;
1222 else 1222 else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1235 sh = regs->gpr[rb] & 0x7f; 1235 sh = regs->gpr[rb] & 0x7f;
1236 ival = (signed long int) regs->gpr[rd]; 1236 ival = (signed long int) regs->gpr[rd];
1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1238 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) 1238 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1239 regs->xer |= XER_CA; 1239 regs->xer |= XER_CA;
1240 else 1240 else
1241 regs->xer &= ~XER_CA; 1241 regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1246 sh = rb | ((instr & 2) << 4); 1246 sh = rb | ((instr & 2) << 4);
1247 ival = (signed long int) regs->gpr[rd]; 1247 ival = (signed long int) regs->gpr[rd];
1248 regs->gpr[ra] = ival >> sh; 1248 regs->gpr[ra] = ival >> sh;
1249 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1249 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1250 regs->xer |= XER_CA; 1250 regs->xer |= XER_CA;
1251 else 1251 else
1252 regs->xer &= ~XER_CA; 1252 regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6b0641c3f03f..fe52db2eea6a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
1307 out_enable: 1307 out_enable:
1308 pmao_restore_workaround(ebb); 1308 pmao_restore_workaround(ebb);
1309 1309
1310 if (ppmu->flags & PPMU_ARCH_207S)
1311 mtspr(SPRN_MMCR2, 0);
1312
1310 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); 1313 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1311 1314
1312 mb(); 1315 mb();
@@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
1315 1318
1316 write_mmcr0(cpuhw, mmcr0); 1319 write_mmcr0(cpuhw, mmcr0);
1317 1320
1318 if (ppmu->flags & PPMU_ARCH_207S)
1319 mtspr(SPRN_MMCR2, 0);
1320
1321 /* 1321 /*
1322 * Enable instruction sampling if necessary 1322 * Enable instruction sampling if necessary
1323 */ 1323 */
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c41d830..0ad533b617f7 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
249 249
250 rc = opal_get_elog_size(&id, &size, &type); 250 rc = opal_get_elog_size(&id, &size, &type);
251 if (rc != OPAL_SUCCESS) { 251 if (rc != OPAL_SUCCESS) {
252 pr_err("ELOG: Opal log read failed\n"); 252 pr_err("ELOG: OPAL log info read failed\n");
253 return; 253 return;
254 } 254 }
255 255
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
257 log_id = be64_to_cpu(id); 257 log_id = be64_to_cpu(id);
258 elog_type = be64_to_cpu(type); 258 elog_type = be64_to_cpu(type);
259 259
260 BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); 260 WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
261 261
262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE) 262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
263 elog_size = OPAL_MAX_ERRLOG_SIZE; 263 elog_size = OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6a80b..2d0b4d68a40a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
86 } 86 }
87 87
88 of_node_set_flag(dn, OF_DYNAMIC); 88 of_node_set_flag(dn, OF_DYNAMIC);
89 of_node_init(dn);
89 90
90 return dn; 91 return dn;
91} 92}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb65d0aa..1c0a60d98867 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
69 69
70 np->properties = proplist; 70 np->properties = proplist;
71 of_node_set_flag(np, OF_DYNAMIC); 71 of_node_set_flag(np, OF_DYNAMIC);
72 of_node_init(np);
72 73
73 np->parent = derive_parent(path); 74 np->parent = derive_parent(path);
74 if (IS_ERR(np->parent)) { 75 if (IS_ERR(np->parent)) {
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4be07c..bf5b3f5f4962 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@ endif
32 32
33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) 33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ 34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
35 $(call cc-option,-m2a-nofpu,) 35 $(call cc-option,-m2a-nofpu,) \
36 $(call cc-option,-m4-nofpu,)
36cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) 37cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
37cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ 38cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
38 $(call cc-option,-mno-implicit-fp,-m4-nofpu) 39 $(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a80029035bf2..f9e4fdd3b877 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
370 */ 370 */
371 detect_extended_topology(c); 371 detect_extended_topology(c);
372 372
373 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
374 /*
375 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
376 * detection.
377 */
378 c->x86_max_cores = intel_num_cpu_cores(c);
379#ifdef CONFIG_X86_32
380 detect_ht(c);
381#endif
382 }
383
373 l2 = init_intel_cacheinfo(c); 384 l2 = init_intel_cacheinfo(c);
374 if (c->cpuid_level > 9) { 385 if (c->cpuid_level > 9) {
375 unsigned eax = cpuid_eax(10); 386 unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
438 set_cpu_cap(c, X86_FEATURE_P3); 449 set_cpu_cap(c, X86_FEATURE_P3);
439#endif 450#endif
440 451
441 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
442 /*
443 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
444 * detection.
445 */
446 c->x86_max_cores = intel_num_cpu_cores(c);
447#ifdef CONFIG_X86_32
448 detect_ht(c);
449#endif
450 }
451
452 /* Work around errata */ 452 /* Work around errata */
453 srat_detect_node(c); 453 srat_detect_node(c);
454 454
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c85b6f..9c8f7394c612 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
730#endif 730#endif
731 } 731 }
732 732
733#ifdef CONFIG_X86_HT
734 /*
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
739 * c->phys_proc_id.
740 */
741 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743#endif
744
733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734 746
735 return l2; 747 return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38153b2..9a79c8dbd8e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
2451 for_each_online_cpu(i) { 2451 for_each_online_cpu(i) {
2452 err = mce_device_create(i); 2452 err = mce_device_create(i);
2453 if (err) { 2453 if (err) {
2454 /*
2455 * Register notifier anyway (and do not unreg it) so
2456 * that we don't leave undeleted timers, see notifier
2457 * callback above.
2458 */
2459 __register_hotcpu_notifier(&mce_cpu_notifier);
2454 cpu_notifier_register_done(); 2460 cpu_notifier_register_done();
2455 goto err_device_create; 2461 goto err_device_create;
2456 } 2462 }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
2471err_register: 2477err_register:
2472 unregister_syscore_ops(&mce_syscore_ops); 2478 unregister_syscore_ops(&mce_syscore_ops);
2473 2479
2474 cpu_notifier_register_begin();
2475 __unregister_hotcpu_notifier(&mce_cpu_notifier);
2476 cpu_notifier_register_done();
2477
2478err_device_create: 2480err_device_create:
2479 /* 2481 /*
2480 * We didn't keep track of which devices were created above, but 2482 * We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff8a4f6..2879ecdaac43 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
118 continue; 118 continue;
119 if (event->attr.config1 & ~er->valid_mask) 119 if (event->attr.config1 & ~er->valid_mask)
120 return -EINVAL; 120 return -EINVAL;
121 /* Check if the extra msrs can be safely accessed*/
122 if (!er->extra_msr_access)
123 return -ENXIO;
121 124
122 reg->idx = er->idx; 125 reg->idx = er->idx;
123 reg->config = event->attr.config1; 126 reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bdd974b..8ade93111e03 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@ struct extra_reg {
295 u64 config_mask; 295 u64 config_mask;
296 u64 valid_mask; 296 u64 valid_mask;
297 int idx; /* per_xxx->regs[] reg index */ 297 int idx; /* per_xxx->regs[] reg index */
298 bool extra_msr_access;
298}; 299};
299 300
300#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 301#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
301 .event = (e), \ 302 .event = (e), \
302 .msr = (ms), \ 303 .msr = (ms), \
303 .config_mask = (m), \ 304 .config_mask = (m), \
304 .valid_mask = (vm), \ 305 .valid_mask = (vm), \
305 .idx = EXTRA_REG_##i, \ 306 .idx = EXTRA_REG_##i, \
307 .extra_msr_access = true, \
306 } 308 }
307 309
308#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 310#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 07846d738bdb..2502d0d9d246 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void)
2182 } 2182 }
2183} 2183}
2184 2184
2185/*
2186 * Under certain circumstances, access certain MSR may cause #GP.
2187 * The function tests if the input MSR can be safely accessed.
2188 */
2189static bool check_msr(unsigned long msr, u64 mask)
2190{
2191 u64 val_old, val_new, val_tmp;
2192
2193 /*
2194 * Read the current value, change it and read it back to see if it
2195 * matches, this is needed to detect certain hardware emulators
2196 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2197 */
2198 if (rdmsrl_safe(msr, &val_old))
2199 return false;
2200
2201 /*
2202 * Only change the bits which can be updated by wrmsrl.
2203 */
2204 val_tmp = val_old ^ mask;
2205 if (wrmsrl_safe(msr, val_tmp) ||
2206 rdmsrl_safe(msr, &val_new))
2207 return false;
2208
2209 if (val_new != val_tmp)
2210 return false;
2211
2212 /* Here it's sure that the MSR can be safely accessed.
2213 * Restore the old value and return.
2214 */
2215 wrmsrl(msr, val_old);
2216
2217 return true;
2218}
2219
2185static __init void intel_sandybridge_quirk(void) 2220static __init void intel_sandybridge_quirk(void)
2186{ 2221{
2187 x86_pmu.check_microcode = intel_snb_check_microcode; 2222 x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void)
2271 union cpuid10_ebx ebx; 2306 union cpuid10_ebx ebx;
2272 struct event_constraint *c; 2307 struct event_constraint *c;
2273 unsigned int unused; 2308 unsigned int unused;
2274 int version; 2309 struct extra_reg *er;
2310 int version, i;
2275 2311
2276 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 2312 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2277 switch (boot_cpu_data.x86) { 2313 switch (boot_cpu_data.x86) {
@@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void)
2474 case 62: /* IvyBridge EP */ 2510 case 62: /* IvyBridge EP */
2475 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2511 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2476 sizeof(hw_cache_event_ids)); 2512 sizeof(hw_cache_event_ids));
2513 /* dTLB-load-misses on IVB is different than SNB */
2514 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2515
2477 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2516 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2478 sizeof(hw_cache_extra_regs)); 2517 sizeof(hw_cache_extra_regs));
2479 2518
@@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void)
2574 } 2613 }
2575 } 2614 }
2576 2615
2616 /*
2617 * Access LBR MSR may cause #GP under certain circumstances.
2618 * E.g. KVM doesn't support LBR MSR
2619 * Check all LBT MSR here.
2620 * Disable LBR access if any LBR MSRs can not be accessed.
2621 */
2622 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2623 x86_pmu.lbr_nr = 0;
2624 for (i = 0; i < x86_pmu.lbr_nr; i++) {
2625 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2626 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2627 x86_pmu.lbr_nr = 0;
2628 }
2629
2630 /*
2631 * Access extra MSR may cause #GP under certain circumstances.
2632 * E.g. KVM doesn't support offcore event
2633 * Check all extra_regs here.
2634 */
2635 if (x86_pmu.extra_regs) {
2636 for (er = x86_pmu.extra_regs; er->msr; er++) {
2637 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2638 /* Disable LBR select mapping */
2639 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2640 x86_pmu.lbr_sel_map = NULL;
2641 }
2642 }
2643
2577 /* Support full width counters using alternative MSR range */ 2644 /* Support full width counters using alternative MSR range */
2578 if (x86_pmu.intel_cap.full_width_write) { 2645 if (x86_pmu.intel_cap.full_width_write) {
2579 x86_pmu.max_period = x86_pmu.cntval_mask; 2646 x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970cb744d..696ade311ded 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
311 if (!x86_pmu.bts) 311 if (!x86_pmu.bts)
312 return 0; 312 return 0;
313 313
314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); 314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
315 if (unlikely(!buffer)) 315 if (unlikely(!buffer)) {
316 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
316 return -ENOMEM; 317 return -ENOMEM;
318 }
317 319
318 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; 320 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
319 thresh = max / 16; 321 thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea38b9c..ae6552a0701f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), 553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), 554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), 561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), 562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1226 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1227 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1228 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1246 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1248 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1250 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e78b36..0d0c9d4ab6d5 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@ sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae sysenter_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp)
429sysenter_after_call: 428sysenter_after_call:
429 movl %eax,PT_EAX(%esp)
430 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
431 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
432 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
502 jae syscall_badsys 502 jae syscall_badsys
503syscall_call: 503syscall_call:
504 call *sys_call_table(,%eax,4) 504 call *sys_call_table(,%eax,4)
505syscall_after_call:
505 movl %eax,PT_EAX(%esp) # store the return value 506 movl %eax,PT_EAX(%esp) # store the return value
506syscall_exit: 507syscall_exit:
507 LOCKDEP_SYS_EXIT 508 LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
675END(syscall_fault) 676END(syscall_fault)
676 677
677syscall_badsys: 678syscall_badsys:
678 movl $-ENOSYS,PT_EAX(%esp) 679 movl $-ENOSYS,%eax
679 jmp syscall_exit 680 jmp syscall_after_call
680END(syscall_badsys) 681END(syscall_badsys)
681 682
682sysenter_badsys: 683sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp) 684 movl $-ENOSYS,%eax
684 jmp sysenter_after_call 685 jmp sysenter_after_call
685END(syscall_badsys) 686END(syscall_badsys)
686 CFI_ENDPROC 687 CFI_ENDPROC
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df664901..67e6d19ef1be 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
574 struct kprobe *p; 574 struct kprobe *p;
575 struct kprobe_ctlblk *kcb; 575 struct kprobe_ctlblk *kcb;
576 576
577 if (user_mode_vm(regs))
578 return 0;
579
577 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 580 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
578 /* 581 /*
579 * We don't want to be preempted for the entire 582 * We don't want to be preempted for the entire
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index c98583588580..ebfa9b2c871d 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -36,99 +36,133 @@
36 36
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/mm.h> 38#include <linux/mm.h>
39#include <linux/slab.h>
39#include <linux/vmalloc.h> 40#include <linux/vmalloc.h>
40 41
41#include <xen/interface/xen.h> 42#include <xen/interface/xen.h>
42#include <xen/page.h> 43#include <xen/page.h>
43#include <xen/grant_table.h> 44#include <xen/grant_table.h>
45#include <xen/xen.h>
44 46
45#include <asm/pgtable.h> 47#include <asm/pgtable.h>
46 48
47static int map_pte_fn(pte_t *pte, struct page *pmd_page, 49static struct gnttab_vm_area {
48 unsigned long addr, void *data) 50 struct vm_struct *area;
51 pte_t **ptes;
52} gnttab_shared_vm_area, gnttab_status_vm_area;
53
54int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
55 unsigned long max_nr_gframes,
56 void **__shared)
49{ 57{
50 unsigned long **frames = (unsigned long **)data; 58 void *shared = *__shared;
59 unsigned long addr;
60 unsigned long i;
51 61
52 set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); 62 if (shared == NULL)
53 (*frames)++; 63 *__shared = shared = gnttab_shared_vm_area.area->addr;
54 return 0;
55}
56 64
57/* 65 addr = (unsigned long)shared;
58 * This function is used to map shared frames to store grant status. It is 66
59 * different from map_pte_fn above, the frames type here is uint64_t. 67 for (i = 0; i < nr_gframes; i++) {
60 */ 68 set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
61static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, 69 mfn_pte(frames[i], PAGE_KERNEL));
62 unsigned long addr, void *data) 70 addr += PAGE_SIZE;
63{ 71 }
64 uint64_t **frames = (uint64_t **)data;
65 72
66 set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
67 (*frames)++;
68 return 0; 73 return 0;
69} 74}
70 75
71static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, 76int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
72 unsigned long addr, void *data) 77 unsigned long max_nr_gframes,
78 grant_status_t **__shared)
73{ 79{
80 grant_status_t *shared = *__shared;
81 unsigned long addr;
82 unsigned long i;
83
84 if (shared == NULL)
85 *__shared = shared = gnttab_status_vm_area.area->addr;
86
87 addr = (unsigned long)shared;
88
89 for (i = 0; i < nr_gframes; i++) {
90 set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
91 mfn_pte(frames[i], PAGE_KERNEL));
92 addr += PAGE_SIZE;
93 }
74 94
75 set_pte_at(&init_mm, addr, pte, __pte(0));
76 return 0; 95 return 0;
77} 96}
78 97
79int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, 98void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
80 unsigned long max_nr_gframes,
81 void **__shared)
82{ 99{
83 int rc; 100 pte_t **ptes;
84 void *shared = *__shared; 101 unsigned long addr;
102 unsigned long i;
85 103
86 if (shared == NULL) { 104 if (shared == gnttab_status_vm_area.area->addr)
87 struct vm_struct *area = 105 ptes = gnttab_status_vm_area.ptes;
88 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); 106 else
89 BUG_ON(area == NULL); 107 ptes = gnttab_shared_vm_area.ptes;
90 shared = area->addr;
91 *__shared = shared;
92 }
93 108
94 rc = apply_to_page_range(&init_mm, (unsigned long)shared, 109 addr = (unsigned long)shared;
95 PAGE_SIZE * nr_gframes, 110
96 map_pte_fn, &frames); 111 for (i = 0; i < nr_gframes; i++) {
97 return rc; 112 set_pte_at(&init_mm, addr, ptes[i], __pte(0));
113 addr += PAGE_SIZE;
114 }
98} 115}
99 116
100int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, 117static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
101 unsigned long max_nr_gframes,
102 grant_status_t **__shared)
103{ 118{
104 int rc; 119 area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
105 grant_status_t *shared = *__shared; 120 if (area->ptes == NULL)
121 return -ENOMEM;
106 122
107 if (shared == NULL) { 123 area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
108 /* No need to pass in PTE as we are going to do it 124 if (area->area == NULL) {
109 * in apply_to_page_range anyhow. */ 125 kfree(area->ptes);
110 struct vm_struct *area = 126 return -ENOMEM;
111 alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
112 BUG_ON(area == NULL);
113 shared = area->addr;
114 *__shared = shared;
115 } 127 }
116 128
117 rc = apply_to_page_range(&init_mm, (unsigned long)shared, 129 return 0;
118 PAGE_SIZE * nr_gframes,
119 map_pte_fn_status, &frames);
120 return rc;
121} 130}
122 131
123void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) 132static void arch_gnttab_vfree(struct gnttab_vm_area *area)
133{
134 free_vm_area(area->area);
135 kfree(area->ptes);
136}
137
138int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
124{ 139{
125 apply_to_page_range(&init_mm, (unsigned long)shared, 140 int ret;
126 PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); 141
142 if (!xen_pv_domain())
143 return 0;
144
145 ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
146 if (ret < 0)
147 return ret;
148
149 /*
150 * Always allocate the space for the status frames in case
151 * we're migrated to a host with V2 support.
152 */
153 ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
154 if (ret < 0)
155 goto err;
156
157 return 0;
158 err:
159 arch_gnttab_vfree(&gnttab_shared_vm_area);
160 return -ENOMEM;
127} 161}
162
128#ifdef CONFIG_XEN_PVH 163#ifdef CONFIG_XEN_PVH
129#include <xen/balloon.h> 164#include <xen/balloon.h>
130#include <xen/events.h> 165#include <xen/events.h>
131#include <xen/xen.h>
132#include <linux/slab.h> 166#include <linux/slab.h>
133static int __init xlated_setup_gnttab_pages(void) 167static int __init xlated_setup_gnttab_pages(void)
134{ 168{
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec346e35..8453e6e39895 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
376 beqz a2, 1f # if at start of vector, don't restore 376 beqz a2, 1f # if at start of vector, don't restore
377 377
378 addi a0, a0, -128 378 addi a0, a0, -128
379 bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 379 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
380 bbsi a0, 7, 2f 380
381 /*
382 * This fixup handler is for the extremely unlikely case where the
383 * overflow handler's reference thru a0 gets a hardware TLB refill
384 * that bumps out the (distinct, aliasing) TLB entry that mapped its
385 * prior references thru a9/a13, and where our reference now thru
386 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
387 */
388 movi a2, window_overflow_restore_a0_fixup
389 s32i a2, a3, EXC_TABLE_FIXUP
390 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
391 xsr a3, excsave1
392
393 bbsi.l a0, 7, 2f
381 394
382 /* 395 /*
383 * Restore a0 as saved by _WindowOverflow8(). 396 * Restore a0 as saved by _WindowOverflow8().
384 *
385 * FIXME: we really need a fixup handler for this L32E,
386 * for the extremely unlikely case where the overflow handler's
387 * reference thru a0 gets a hardware TLB refill that bumps out
388 * the (distinct, aliasing) TLB entry that mapped its prior
389 * references thru a9, and where our reference now thru a9
390 * gets a 2nd-level miss exception (not hardware TLB refill).
391 */ 397 */
392 398
393 l32e a2, a9, -16 399 l32e a0, a9, -16
394 wsr a2, depc # replace the saved a0 400 wsr a0, depc # replace the saved a0
395 j 1f 401 j 3f
396 402
3972: 4032:
398 /* 404 /*
399 * Restore a0 as saved by _WindowOverflow12(). 405 * Restore a0 as saved by _WindowOverflow12().
400 *
401 * FIXME: we really need a fixup handler for this L32E,
402 * for the extremely unlikely case where the overflow handler's
403 * reference thru a0 gets a hardware TLB refill that bumps out
404 * the (distinct, aliasing) TLB entry that mapped its prior
405 * references thru a13, and where our reference now thru a13
406 * gets a 2nd-level miss exception (not hardware TLB refill).
407 */ 406 */
408 407
409 l32e a2, a13, -16 408 l32e a0, a13, -16
410 wsr a2, depc # replace the saved a0 409 wsr a0, depc # replace the saved a0
4103:
411 xsr a3, excsave1
412 movi a0, 0
413 s32i a0, a3, EXC_TABLE_FIXUP
414 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
4111: 4151:
412 /* 416 /*
413 * Restore WindowBase while leaving all address registers restored. 417 * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
449 453
450 s32i a0, a2, PT_DEPC 454 s32i a0, a2, PT_DEPC
451 455
456_DoubleExceptionVector_handle_exception:
452 addx4 a0, a0, a3 457 addx4 a0, a0, a3
453 l32i a0, a0, EXC_TABLE_FAST_USER 458 l32i a0, a0, EXC_TABLE_FAST_USER
454 xsr a3, excsave1 459 xsr a3, excsave1
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
464 rotw -3 469 rotw -3
465 j 1b 470 j 1b
466 471
467 .end literal_prefix
468 472
469ENDPROC(_DoubleExceptionVector) 473ENDPROC(_DoubleExceptionVector)
470 474
471/* 475/*
476 * Fixup handler for TLB miss in double exception handler for window owerflow.
477 * We get here with windowbase set to the window that was being spilled and
478 * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
479 * (bit set) window.
480 *
481 * We do the following here:
482 * - go to the original window retaining a0 value;
483 * - set up exception stack to return back to appropriate a0 restore code
484 * (we'll need to rotate window back and there's no place to save this
485 * information, use different return address for that);
486 * - handle the exception;
487 * - go to the window that was being spilled;
488 * - set up window_overflow_restore_a0_fixup as a fixup routine;
489 * - reload a0;
490 * - restore the original window;
491 * - reset the default fixup routine;
492 * - return to user. By the time we get to this fixup handler all information
493 * about the conditions of the original double exception that happened in
494 * the window overflow handler is lost, so we just return to userspace to
495 * retry overflow from start.
496 *
497 * a0: value of depc, original value in depc
498 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
499 * a3: exctable, original value in excsave1
500 */
501
502ENTRY(window_overflow_restore_a0_fixup)
503
504 rsr a0, ps
505 extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
506 rsr a2, windowbase
507 sub a0, a2, a0
508 extui a0, a0, 0, 3
509 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
510 xsr a3, excsave1
511
512 _beqi a0, 1, .Lhandle_1
513 _beqi a0, 3, .Lhandle_3
514
515 .macro overflow_fixup_handle_exception_pane n
516
517 rsr a0, depc
518 rotw -\n
519
520 xsr a3, excsave1
521 wsr a2, depc
522 l32i a2, a3, EXC_TABLE_KSTK
523 s32i a0, a2, PT_AREG0
524
525 movi a0, .Lrestore_\n
526 s32i a0, a2, PT_DEPC
527 rsr a0, exccause
528 j _DoubleExceptionVector_handle_exception
529
530 .endm
531
532 overflow_fixup_handle_exception_pane 2
533.Lhandle_1:
534 overflow_fixup_handle_exception_pane 1
535.Lhandle_3:
536 overflow_fixup_handle_exception_pane 3
537
538 .macro overflow_fixup_restore_a0_pane n
539
540 rotw \n
541 /* Need to preserve a0 value here to be able to handle exception
542 * that may occur on a0 reload from stack. It may occur because
543 * TLB miss handler may not be atomic and pointer to page table
544 * may be lost before we get here. There are no free registers,
545 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
546 */
547 xsr a3, excsave1
548 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
549 movi a2, window_overflow_restore_a0_fixup
550 s32i a2, a3, EXC_TABLE_FIXUP
551 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
552 xsr a3, excsave1
553 bbsi.l a0, 7, 1f
554 l32e a0, a9, -16
555 j 2f
5561:
557 l32e a0, a13, -16
5582:
559 rotw -\n
560
561 .endm
562
563.Lrestore_2:
564 overflow_fixup_restore_a0_pane 2
565
566.Lset_default_fixup:
567 xsr a3, excsave1
568 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
569 movi a2, 0
570 s32i a2, a3, EXC_TABLE_FIXUP
571 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
572 xsr a3, excsave1
573 rfe
574
575.Lrestore_1:
576 overflow_fixup_restore_a0_pane 1
577 j .Lset_default_fixup
578.Lrestore_3:
579 overflow_fixup_restore_a0_pane 3
580 j .Lset_default_fixup
581
582ENDPROC(window_overflow_restore_a0_fixup)
583
584 .end literal_prefix
585/*
472 * Debug interrupt vector 586 * Debug interrupt vector
473 * 587 *
474 * There is not much space here, so simply jump to another handler. 588 * There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c0085dff..d16db6df86f8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@ SECTIONS
269 .UserExceptionVector.literal) 269 .UserExceptionVector.literal)
270 SECTION_VECTOR (_DoubleExceptionVector_literal, 270 SECTION_VECTOR (_DoubleExceptionVector_literal,
271 .DoubleExceptionVector.literal, 271 .DoubleExceptionVector.literal,
272 DOUBLEEXC_VECTOR_VADDR - 16, 272 DOUBLEEXC_VECTOR_VADDR - 40,
273 SIZEOF(.UserExceptionVector.text), 273 SIZEOF(.UserExceptionVector.text),
274 .UserExceptionVector.text) 274 .UserExceptionVector.text)
275 SECTION_VECTOR (_DoubleExceptionVector_text, 275 SECTION_VECTOR (_DoubleExceptionVector_text,
276 .DoubleExceptionVector.text, 276 .DoubleExceptionVector.text,
277 DOUBLEEXC_VECTOR_VADDR, 277 DOUBLEEXC_VECTOR_VADDR,
278 32, 278 40,
279 .DoubleExceptionVector.literal) 279 .DoubleExceptionVector.literal)
280 280
281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256bb215..77ed20209ca5 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
191 return -EINVAL; 191 return -EINVAL;
192 } 192 }
193 193
194 if (it && start - it->start < bank_sz) { 194 if (it && start - it->start <= bank_sz) {
195 if (start == it->start) { 195 if (start == it->start) {
196 if (end - it->start < bank_sz) { 196 if (end - it->start < bank_sz) {
197 it->start = end; 197 it->start = end;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d19c37a7abc9..677c0c1b03bd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4798,9 +4798,8 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4799{ 4799{
4800 struct ata_queued_cmd *qc = NULL; 4800 struct ata_queued_cmd *qc = NULL;
4801 unsigned int i, tag, max_queue; 4801 unsigned int max_queue = ap->host->n_tags;
4802 4802 unsigned int i, tag;
4803 max_queue = ap->scsi_host->can_queue;
4804 4803
4805 /* no command while frozen */ 4804 /* no command while frozen */
4806 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4805 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
@@ -6094,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
6094{ 6093{
6095 spin_lock_init(&host->lock); 6094 spin_lock_init(&host->lock);
6096 mutex_init(&host->eh_mutex); 6095 mutex_init(&host->eh_mutex);
6096 host->n_tags = ATA_MAX_QUEUE - 1;
6097 host->dev = dev; 6097 host->dev = dev;
6098 host->ops = ops; 6098 host->ops = ops;
6099} 6099}
@@ -6175,15 +6175,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6175{ 6175{
6176 int i, rc; 6176 int i, rc;
6177 6177
6178 /* 6178 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6179 * The max queue supported by hardware must not be greater than
6180 * ATA_MAX_QUEUE.
6181 */
6182 if (sht->can_queue > ATA_MAX_QUEUE) {
6183 dev_err(host->dev, "BUG: the hardware max queue is too large\n");
6184 WARN_ON(1);
6185 return -EINVAL;
6186 }
6187 6179
6188 /* host must have been started */ 6180 /* host must have been started */
6189 if (!(host->flags & ATA_HOST_STARTED)) { 6181 if (!(host->flags & ATA_HOST_STARTED)) {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37be..36e54be402df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622 memset(&zram->stats, 0, sizeof(zram->stats)); 622 memset(&zram->stats, 0, sizeof(zram->stats));
623 623
624 zram->disksize = 0; 624 zram->disksize = 0;
625 if (reset_capacity) { 625 if (reset_capacity)
626 set_capacity(zram->disk, 0); 626 set_capacity(zram->disk, 0);
627 revalidate_disk(zram->disk); 627
628 }
629 up_write(&zram->init_lock); 628 up_write(&zram->init_lock);
629
630 /*
631 * Revalidate disk out of the init_lock to avoid lockdep splat.
632 * It's okay because disk's capacity is protected by init_lock
633 * so that revalidate_disk always sees up-to-date capacity.
634 */
635 if (reset_capacity)
636 revalidate_disk(zram->disk);
630} 637}
631 638
632static ssize_t disksize_store(struct device *dev, 639static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
666 zram->comp = comp; 673 zram->comp = comp;
667 zram->disksize = disksize; 674 zram->disksize = disksize;
668 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 675 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669 revalidate_disk(zram->disk);
670 up_write(&zram->init_lock); 676 up_write(&zram->init_lock);
677
678 /*
679 * Revalidate disk out of the init_lock to avoid lockdep splat.
680 * It's okay because disk's capacity is protected by init_lock
681 * so that revalidate_disk always sees up-to-date capacity.
682 */
683 revalidate_disk(zram->disk);
684
671 return len; 685 return len;
672 686
673out_destroy_comp: 687out_destroy_comp:
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57985410f12f..a66a3217f1d9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@ static const struct {
336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, 336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
337 337
338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, 338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
339 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 339 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
340 340
341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, 341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
342 0}, 342 QUIRK_NO_MSI},
343 343
344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1ac..b6ae89ea8811 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
284 284
285static struct irq_domain_ops gpio_rcar_irq_domain_ops = { 285static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
286 .map = gpio_rcar_irq_domain_map, 286 .map = gpio_rcar_irq_domain_map,
287 .xlate = irq_domain_xlate_twocell,
287}; 288};
288 289
289struct gpio_rcar_info { 290struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d26..d893e4da5dce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@ out:
1616 return ret; 1616 return ret;
1617} 1617}
1618 1618
1619void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1620{
1621 struct i915_vma *vma;
1622
1623 /*
1624 * Only the global gtt is relevant for gtt memory mappings, so restrict
1625 * list traversal to objects bound into the global address space. Note
1626 * that the active list should be empty, but better safe than sorry.
1627 */
1628 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1629 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1630 i915_gem_release_mmap(vma->obj);
1631 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1632 i915_gem_release_mmap(vma->obj);
1633}
1634
1635/** 1619/**
1636 * i915_gem_release_mmap - remove physical page mappings 1620 * i915_gem_release_mmap - remove physical page mappings
1637 * @obj: obj in question 1621 * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1657 obj->fault_mappable = false; 1641 obj->fault_mappable = false;
1658} 1642}
1659 1643
1644void
1645i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1646{
1647 struct drm_i915_gem_object *obj;
1648
1649 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1650 i915_gem_release_mmap(obj);
1651}
1652
1660uint32_t 1653uint32_t
1661i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1654i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1662{ 1655{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a178..34894b573064 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
31struct i915_render_state { 31struct i915_render_state {
32 struct drm_i915_gem_object *obj; 32 struct drm_i915_gem_object *obj;
33 unsigned long ggtt_offset; 33 unsigned long ggtt_offset;
34 void *batch; 34 u32 *batch;
35 u32 size; 35 u32 size;
36 u32 len; 36 u32 len;
37}; 37};
@@ -80,7 +80,7 @@ free:
80 80
81static void render_state_free(struct i915_render_state *so) 81static void render_state_free(struct i915_render_state *so)
82{ 82{
83 kunmap(so->batch); 83 kunmap(kmap_to_page(so->batch));
84 i915_gem_object_ggtt_unpin(so->obj); 84 i915_gem_object_ggtt_unpin(so->obj);
85 drm_gem_object_unreference(&so->obj->base); 85 drm_gem_object_unreference(&so->obj->base);
86 kfree(so); 86 kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069765ad..c05c84f3f091 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2845{ 2845{
2846 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2846 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2847 struct intel_engine_cs *signaller; 2847 struct intel_engine_cs *signaller;
2848 u32 seqno, ctl; 2848 u32 seqno;
2849 2849
2850 ring->hangcheck.deadlock++; 2850 ring->hangcheck.deadlock++;
2851 2851
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2858 return -1; 2858 return -1;
2859 2859
2860 /* cursory check for an unkickable deadlock */
2861 ctl = I915_READ_CTL(signaller);
2862 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2863 return -1;
2864
2865 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2860 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2866 return 1; 2861 return 1;
2867 2862
2868 if (signaller->hangcheck.deadlock) 2863 /* cursory check for an unkickable deadlock */
2864 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2865 semaphore_passed(signaller) < 0)
2869 return -1; 2866 return -1;
2870 2867
2871 return 0; 2868 return 0;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0b2471107137..c0ea66192fe0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2291 gb_tile_moden = 0; 2291 gb_tile_moden = 0;
2292 break; 2292 break;
2293 } 2293 }
2294 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2294 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2295 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2295 } 2296 }
2296 } else if (num_pipe_configs == 8) { 2297 } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7376 tmp = RREG32(IH_RB_CNTL); 7377 tmp = RREG32(IH_RB_CNTL);
7377 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7378 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7378 WREG32(IH_RB_CNTL, tmp); 7379 WREG32(IH_RB_CNTL, tmp);
7380 wptr &= ~RB_OVERFLOW;
7379 } 7381 }
7380 return (wptr & rdev->ih.ptr_mask); 7382 return (wptr & rdev->ih.ptr_mask);
7381} 7383}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 250bac3935a4..15e4f28015e1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4756 tmp = RREG32(IH_RB_CNTL); 4756 tmp = RREG32(IH_RB_CNTL);
4757 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4757 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4758 WREG32(IH_RB_CNTL, tmp); 4758 WREG32(IH_RB_CNTL, tmp);
4759 wptr &= ~RB_OVERFLOW;
4759 } 4760 }
4760 return (wptr & rdev->ih.ptr_mask); 4761 return (wptr & rdev->ih.ptr_mask);
4761} 4762}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..3c69f58e46ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3795 tmp = RREG32(IH_RB_CNTL); 3795 tmp = RREG32(IH_RB_CNTL);
3796 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3796 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3797 WREG32(IH_RB_CNTL, tmp); 3797 WREG32(IH_RB_CNTL, tmp);
3798 wptr &= ~RB_OVERFLOW;
3798 } 3799 }
3799 return (wptr & rdev->ih.ptr_mask); 3800 return (wptr & rdev->ih.ptr_mask);
3800} 3801}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b7204500a9a6..60c47f829122 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@ struct radeon_bo_va {
449 449
450 /* protected by vm mutex */ 450 /* protected by vm mutex */
451 struct list_head vm_list; 451 struct list_head vm_list;
452 struct list_head vm_status;
452 453
453 /* constant after initialization */ 454 /* constant after initialization */
454 struct radeon_vm *vm; 455 struct radeon_vm *vm;
@@ -867,6 +868,9 @@ struct radeon_vm {
867 struct list_head va; 868 struct list_head va;
868 unsigned id; 869 unsigned id;
869 870
871 /* BOs freed, but not yet updated in the PT */
872 struct list_head freed;
873
870 /* contains the page directory */ 874 /* contains the page directory */
871 struct radeon_bo *page_directory; 875 struct radeon_bo *page_directory;
872 uint64_t pd_gpu_addr; 876 uint64_t pd_gpu_addr;
@@ -875,6 +879,8 @@ struct radeon_vm {
875 /* array of page tables, one for each page directory entry */ 879 /* array of page tables, one for each page directory entry */
876 struct radeon_vm_pt *page_tables; 880 struct radeon_vm_pt *page_tables;
877 881
882 struct radeon_bo_va *ib_bo_va;
883
878 struct mutex mutex; 884 struct mutex mutex;
879 /* last fence for cs using this vm */ 885 /* last fence for cs using this vm */
880 struct radeon_fence *fence; 886 struct radeon_fence *fence;
@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2832uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2838uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2833int radeon_vm_update_page_directory(struct radeon_device *rdev, 2839int radeon_vm_update_page_directory(struct radeon_device *rdev,
2834 struct radeon_vm *vm); 2840 struct radeon_vm *vm);
2841int radeon_vm_clear_freed(struct radeon_device *rdev,
2842 struct radeon_vm *vm);
2835int radeon_vm_bo_update(struct radeon_device *rdev, 2843int radeon_vm_bo_update(struct radeon_device *rdev,
2836 struct radeon_vm *vm, 2844 struct radeon_bo_va *bo_va,
2837 struct radeon_bo *bo,
2838 struct ttm_mem_reg *mem); 2845 struct ttm_mem_reg *mem);
2839void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2846void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2840 struct radeon_bo *bo); 2847 struct radeon_bo *bo);
@@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2847 struct radeon_bo_va *bo_va, 2854 struct radeon_bo_va *bo_va,
2848 uint64_t offset, 2855 uint64_t offset,
2849 uint32_t flags); 2856 uint32_t flags);
2850int radeon_vm_bo_rmv(struct radeon_device *rdev, 2857void radeon_vm_bo_rmv(struct radeon_device *rdev,
2851 struct radeon_bo_va *bo_va); 2858 struct radeon_bo_va *bo_va);
2852 2859
2853/* audio */ 2860/* audio */
2854void r600_audio_update_hdmi(struct work_struct *work); 2861void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ae763f60c8a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
461 struct radeon_vm *vm) 461 struct radeon_vm *vm)
462{ 462{
463 struct radeon_device *rdev = p->rdev; 463 struct radeon_device *rdev = p->rdev;
464 struct radeon_bo_va *bo_va;
464 int i, r; 465 int i, r;
465 466
466 r = radeon_vm_update_page_directory(rdev, vm); 467 r = radeon_vm_update_page_directory(rdev, vm);
467 if (r) 468 if (r)
468 return r; 469 return r;
469 470
470 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, 471 r = radeon_vm_clear_freed(rdev, vm);
472 if (r)
473 return r;
474
475 if (vm->ib_bo_va == NULL) {
476 DRM_ERROR("Tmp BO not in VM!\n");
477 return -EINVAL;
478 }
479
480 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
471 &rdev->ring_tmp_bo.bo->tbo.mem); 481 &rdev->ring_tmp_bo.bo->tbo.mem);
472 if (r) 482 if (r)
473 return r; 483 return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
480 continue; 490 continue;
481 491
482 bo = p->relocs[i].robj; 492 bo = p->relocs[i].robj;
483 r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); 493 bo_va = radeon_vm_bo_find(vm, bo);
494 if (bo_va == NULL) {
495 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
496 return -EINVAL;
497 }
498
499 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
484 if (r) 500 if (r)
485 return r; 501 return r;
486 } 502 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..697add2cd4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1056 if (!radeon_check_pot_argument(radeon_vm_size)) { 1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size); 1058 radeon_vm_size);
1059 radeon_vm_size = 4096; 1059 radeon_vm_size = 4;
1060 } 1060 }
1061 1061
1062 if (radeon_vm_size < 4) { 1062 if (radeon_vm_size < 1) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", 1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1064 radeon_vm_size); 1064 radeon_vm_size);
1065 radeon_vm_size = 4096; 1065 radeon_vm_size = 4;
1066 } 1066 }
1067 1067
1068 /* 1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits. 1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */ 1070 */
1071 if (radeon_vm_size > 1024*1024) { 1071 if (radeon_vm_size > 1024) {
1072 dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", 1072 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1073 radeon_vm_size); 1073 radeon_vm_size);
1074 radeon_vm_size = 4096; 1074 radeon_vm_size = 4;
1075 } 1075 }
1076 1076
1077 /* defines number of bits in page table versus page directory, 1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */ 1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) { 1080 if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) to small\n", 1081 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1082 radeon_vm_block_size); 1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9; 1083 radeon_vm_block_size = 9;
1084 } 1084 }
1085 1085
1086 if (radeon_vm_block_size > 24 || 1086 if (radeon_vm_block_size > 24 ||
1087 radeon_vm_size < (1ull << radeon_vm_block_size)) { 1087 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) to large\n", 1088 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1089 radeon_vm_block_size); 1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9; 1090 radeon_vm_block_size = 9;
1091 } 1091 }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
1238 /* Adjust VM size here. 1238 /* Adjust VM size here.
1239 * Max GPUVM size for cayman+ is 40 bits. 1239 * Max GPUVM size for cayman+ is 40 bits.
1240 */ 1240 */
1241 rdev->vm_manager.max_pfn = radeon_vm_size << 8; 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1242 1242
1243 /* Set asic functions */ 1243 /* Set asic functions */
1244 r = radeon_asic_init(rdev); 1244 r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3a..e9e361084249 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
173int radeon_aspm = -1; 173int radeon_aspm = -1;
174int radeon_runtime_pm = -1; 174int radeon_runtime_pm = -1;
175int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096; 176int radeon_vm_size = 4;
177int radeon_vm_block_size = 9; 177int radeon_vm_block_size = 9;
178int radeon_deep_color = 0; 178int radeon_deep_color = 0;
179 179
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
244module_param_named(hard_reset, radeon_hard_reset, int, 0444); 244module_param_named(hard_reset, radeon_hard_reset, int, 0444);
245 245
246MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); 246MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
247module_param_named(vm_size, radeon_vm_size, int, 0444); 247module_param_named(vm_size, radeon_vm_size, int, 0444);
248 248
249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); 249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..d25ae6acfd5a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
579 /* new gpu have virtual address space support */ 579 /* new gpu have virtual address space support */
580 if (rdev->family >= CHIP_CAYMAN) { 580 if (rdev->family >= CHIP_CAYMAN) {
581 struct radeon_fpriv *fpriv; 581 struct radeon_fpriv *fpriv;
582 struct radeon_bo_va *bo_va; 582 struct radeon_vm *vm;
583 int r; 583 int r;
584 584
585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
587 return -ENOMEM; 587 return -ENOMEM;
588 } 588 }
589 589
590 r = radeon_vm_init(rdev, &fpriv->vm); 590 vm = &fpriv->vm;
591 r = radeon_vm_init(rdev, vm);
591 if (r) { 592 if (r) {
592 kfree(fpriv); 593 kfree(fpriv);
593 return r; 594 return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
596 if (rdev->accel_working) { 597 if (rdev->accel_working) {
597 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 598 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
598 if (r) { 599 if (r) {
599 radeon_vm_fini(rdev, &fpriv->vm); 600 radeon_vm_fini(rdev, vm);
600 kfree(fpriv); 601 kfree(fpriv);
601 return r; 602 return r;
602 } 603 }
603 604
604 /* map the ib pool buffer read only into 605 /* map the ib pool buffer read only into
605 * virtual address space */ 606 * virtual address space */
606 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 607 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
607 rdev->ring_tmp_bo.bo); 608 rdev->ring_tmp_bo.bo);
608 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 609 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
610 RADEON_VA_IB_OFFSET,
609 RADEON_VM_PAGE_READABLE | 611 RADEON_VM_PAGE_READABLE |
610 RADEON_VM_PAGE_SNOOPED); 612 RADEON_VM_PAGE_SNOOPED);
611 613
612 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 614 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
613 if (r) { 615 if (r) {
614 radeon_vm_fini(rdev, &fpriv->vm); 616 radeon_vm_fini(rdev, vm);
615 kfree(fpriv); 617 kfree(fpriv);
616 return r; 618 return r;
617 } 619 }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
640 /* new gpu have virtual address space support */ 642 /* new gpu have virtual address space support */
641 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 643 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
642 struct radeon_fpriv *fpriv = file_priv->driver_priv; 644 struct radeon_fpriv *fpriv = file_priv->driver_priv;
643 struct radeon_bo_va *bo_va; 645 struct radeon_vm *vm = &fpriv->vm;
644 int r; 646 int r;
645 647
646 if (rdev->accel_working) { 648 if (rdev->accel_working) {
647 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 649 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
648 if (!r) { 650 if (!r) {
649 bo_va = radeon_vm_bo_find(&fpriv->vm, 651 if (vm->ib_bo_va)
650 rdev->ring_tmp_bo.bo); 652 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
651 if (bo_va)
652 radeon_vm_bo_rmv(rdev, bo_va);
653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
654 } 654 }
655 } 655 }
656 656
657 radeon_vm_fini(rdev, &fpriv->vm); 657 radeon_vm_fini(rdev, vm);
658 kfree(fpriv); 658 kfree(fpriv);
659 file_priv->driver_priv = NULL; 659 file_priv->driver_priv = NULL;
660 } 660 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd341..725d3669014f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
332 bo_va->ref_count = 1; 332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list); 333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list); 334 INIT_LIST_HEAD(&bo_va->vm_list);
335 INIT_LIST_HEAD(&bo_va->vm_status);
335 336
336 mutex_lock(&vm->mutex); 337 mutex_lock(&vm->mutex);
337 list_add(&bo_va->vm_list, &vm->va); 338 list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
468 head = &tmp->vm_list; 469 head = &tmp->vm_list;
469 } 470 }
470 471
472 if (bo_va->soffset) {
473 /* add a clone of the bo_va to clear the old address */
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
475 if (!tmp) {
476 mutex_unlock(&vm->mutex);
477 return -ENOMEM;
478 }
479 tmp->soffset = bo_va->soffset;
480 tmp->eoffset = bo_va->eoffset;
481 tmp->vm = vm;
482 list_add(&tmp->vm_status, &vm->freed);
483 }
484
471 bo_va->soffset = soffset; 485 bo_va->soffset = soffset;
472 bo_va->eoffset = eoffset; 486 bo_va->eoffset = eoffset;
473 bo_va->flags = flags; 487 bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
823 * Object have to be reserved and mutex must be locked! 837 * Object have to be reserved and mutex must be locked!
824 */ 838 */
825int radeon_vm_bo_update(struct radeon_device *rdev, 839int radeon_vm_bo_update(struct radeon_device *rdev,
826 struct radeon_vm *vm, 840 struct radeon_bo_va *bo_va,
827 struct radeon_bo *bo,
828 struct ttm_mem_reg *mem) 841 struct ttm_mem_reg *mem)
829{ 842{
843 struct radeon_vm *vm = bo_va->vm;
830 struct radeon_ib ib; 844 struct radeon_ib ib;
831 struct radeon_bo_va *bo_va;
832 unsigned nptes, ndw; 845 unsigned nptes, ndw;
833 uint64_t addr; 846 uint64_t addr;
834 int r; 847 int r;
835 848
836 bo_va = radeon_vm_bo_find(vm, bo);
837 if (bo_va == NULL) {
838 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
839 return -EINVAL;
840 }
841 849
842 if (!bo_va->soffset) { 850 if (!bo_va->soffset) {
843 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 851 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
844 bo, vm); 852 bo_va->bo, vm);
845 return -EINVAL; 853 return -EINVAL;
846 } 854 }
847 855
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
868 876
869 trace_radeon_vm_bo_update(bo_va); 877 trace_radeon_vm_bo_update(bo_va);
870 878
871 nptes = radeon_bo_ngpu_pages(bo); 879 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
872 880
873 /* padding, etc. */ 881 /* padding, etc. */
874 ndw = 64; 882 ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
911} 919}
912 920
913/** 921/**
922 * radeon_vm_clear_freed - clear freed BOs in the PT
923 *
924 * @rdev: radeon_device pointer
925 * @vm: requested vm
926 *
927 * Make sure all freed BOs are cleared in the PT.
928 * Returns 0 for success.
929 *
930 * PTs have to be reserved and mutex must be locked!
931 */
932int radeon_vm_clear_freed(struct radeon_device *rdev,
933 struct radeon_vm *vm)
934{
935 struct radeon_bo_va *bo_va, *tmp;
936 int r;
937
938 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
939 list_del(&bo_va->vm_status);
940 r = radeon_vm_bo_update(rdev, bo_va, NULL);
941 kfree(bo_va);
942 if (r)
943 return r;
944 }
945 return 0;
946
947}
948
949/**
914 * radeon_vm_bo_rmv - remove a bo to a specific vm 950 * radeon_vm_bo_rmv - remove a bo to a specific vm
915 * 951 *
916 * @rdev: radeon_device pointer 952 * @rdev: radeon_device pointer
917 * @bo_va: requested bo_va 953 * @bo_va: requested bo_va
918 * 954 *
919 * Remove @bo_va->bo from the requested vm (cayman+). 955 * Remove @bo_va->bo from the requested vm (cayman+).
920 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
921 * remove the ptes for @bo_va in the page table.
922 * Returns 0 for success.
923 * 956 *
924 * Object have to be reserved! 957 * Object have to be reserved!
925 */ 958 */
926int radeon_vm_bo_rmv(struct radeon_device *rdev, 959void radeon_vm_bo_rmv(struct radeon_device *rdev,
927 struct radeon_bo_va *bo_va) 960 struct radeon_bo_va *bo_va)
928{ 961{
929 int r = 0; 962 struct radeon_vm *vm = bo_va->vm;
930 963
931 mutex_lock(&bo_va->vm->mutex); 964 list_del(&bo_va->bo_list);
932 if (bo_va->soffset)
933 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
934 965
966 mutex_lock(&vm->mutex);
935 list_del(&bo_va->vm_list); 967 list_del(&bo_va->vm_list);
936 mutex_unlock(&bo_va->vm->mutex);
937 list_del(&bo_va->bo_list);
938 968
939 kfree(bo_va); 969 if (bo_va->soffset) {
940 return r; 970 bo_va->bo = NULL;
971 list_add(&bo_va->vm_status, &vm->freed);
972 } else {
973 kfree(bo_va);
974 }
975
976 mutex_unlock(&vm->mutex);
941} 977}
942 978
943/** 979/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
975 int r; 1011 int r;
976 1012
977 vm->id = 0; 1013 vm->id = 0;
1014 vm->ib_bo_va = NULL;
978 vm->fence = NULL; 1015 vm->fence = NULL;
979 vm->last_flush = NULL; 1016 vm->last_flush = NULL;
980 vm->last_id_use = NULL; 1017 vm->last_id_use = NULL;
981 mutex_init(&vm->mutex); 1018 mutex_init(&vm->mutex);
982 INIT_LIST_HEAD(&vm->va); 1019 INIT_LIST_HEAD(&vm->va);
1020 INIT_LIST_HEAD(&vm->freed);
983 1021
984 pd_size = radeon_vm_directory_size(rdev); 1022 pd_size = radeon_vm_directory_size(rdev);
985 pd_entries = radeon_vm_num_pdes(rdev); 1023 pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1034 kfree(bo_va); 1072 kfree(bo_va);
1035 } 1073 }
1036 } 1074 }
1037 1075 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1076 kfree(bo_va);
1038 1077
1039 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 1078 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1040 radeon_bo_unref(&vm->page_tables[i].bo); 1079 radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eba0225259a4..9e854fd016da 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6103 tmp = RREG32(IH_RB_CNTL); 6103 tmp = RREG32(IH_RB_CNTL);
6104 tmp |= IH_WPTR_OVERFLOW_CLEAR; 6104 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6105 WREG32(IH_RB_CNTL, tmp); 6105 WREG32(IH_RB_CNTL, tmp);
6106 wptr &= ~RB_OVERFLOW;
6106 } 6107 }
6107 return (wptr & rdev->ih.ptr_mask); 6108 return (wptr & rdev->ih.ptr_mask);
6108} 6109}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183df..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 /* There are stability issues reported on latops with 1877 /* There are stability issues reported on with
1878 * bapm installed when switching between AC and battery 1878 * bapm enabled when switching between AC and battery
1879 * power. At the same time, some desktop boards hang 1879 * power. At the same time, some MSI boards hang
1880 * if it's not enabled and dpm is enabled. 1880 * if it's not enabled and dpm is enabled. Just enable
1881 * it for MSI boards right now.
1881 */ 1882 */
1882 if (rdev->flags & RADEON_IS_MOBILITY) 1883 if (rdev->pdev->subsystem_vendor == 0x1462)
1883 pi->enable_bapm = false;
1884 else
1885 pi->enable_bapm = true; 1884 pi->enable_bapm = true;
1885 else
1886 pi->enable_bapm = false;
1886 pi->enable_nbps_policy = true; 1887 pi->enable_nbps_policy = true;
1887 pi->enable_sclk_ds = true; 1888 pi->enable_sclk_ds = true;
1888 pi->enable_gfx_power_gating = true; 1889 pi->enable_gfx_power_gating = true;
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c59239f..34b9a601ad07 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
86 */ 86 */
87static inline s8 TEMP_TO_REG(int val) 87static inline s8 TEMP_TO_REG(int val)
88{ 88{
89 return clamp_val(SCALE(val, 1, 1000), -128000, 127000); 89 return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
90} 90}
91 91
92static inline int TEMP_FROM_REG(s8 val) 92static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
384 err = kstrtoul(buf, 10, &val); 384 err = kstrtoul(buf, 10, &val);
385 if (err) 385 if (err)
386 return err; 386 return err;
387 if (val > 255)
388 return -EINVAL;
387 389
388 data->vrm = val; 390 data->vrm = val;
389 return count; 391 return count;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db05550..29ca0bb4f561 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
257} 257}
258 258
259static int input_get_disposition(struct input_dev *dev, 259static int input_get_disposition(struct input_dev *dev,
260 unsigned int type, unsigned int code, int value) 260 unsigned int type, unsigned int code, int *pval)
261{ 261{
262 int disposition = INPUT_IGNORE_EVENT; 262 int disposition = INPUT_IGNORE_EVENT;
263 int value = *pval;
263 264
264 switch (type) { 265 switch (type) {
265 266
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
357 break; 358 break;
358 } 359 }
359 360
361 *pval = value;
360 return disposition; 362 return disposition;
361} 363}
362 364
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
365{ 367{
366 int disposition; 368 int disposition;
367 369
368 disposition = input_get_disposition(dev, type, code, value); 370 disposition = input_get_disposition(dev, type, code, &value);
369 371
370 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 372 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
371 dev->event(dev, type, code, value); 373 dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b48731415..de7be4f03d91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
215 return 0; 215 return 0;
216} 216}
217 217
218#ifdef CONFIG_PM_SLEEP
218static int keyscan_suspend(struct device *dev) 219static int keyscan_suspend(struct device *dev)
219{ 220{
220 struct platform_device *pdev = to_platform_device(dev); 221 struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
249 mutex_unlock(&input->mutex); 250 mutex_unlock(&input->mutex);
250 return retval; 251 return retval;
251} 252}
253#endif
252 254
253static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); 255static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
254 256
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6d..fed5102e1802 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
213 213
214module_platform_driver(sirfsoc_pwrc_driver); 214module_platform_driver(sirfsoc_pwrc_driver);
215 215
216MODULE_LICENSE("GPLv2"); 216MODULE_LICENSE("GPL v2");
217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); 217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); 218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
219MODULE_ALIAS("platform:sirfsoc-pwrc"); 219MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f06..ef9e0b8a9aa7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
132 1232, 5710, 1156, 4696 132 1232, 5710, 1156, 4696
133 }, 133 },
134 { 134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, 135 (const char * const []){"LEN0034", "LEN0036", "LEN2002",
136 "LEN2004", NULL},
136 1024, 5112, 2024, 4832 137 1024, 5112, 2024, 4832
137 }, 138 },
138 { 139 {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
168 "LEN0049", 169 "LEN0049",
169 "LEN2000", 170 "LEN2000",
170 "LEN2001", /* Edge E431 */ 171 "LEN2001", /* Edge E431 */
171 "LEN2002", 172 "LEN2002", /* Edge E531 */
172 "LEN2003", 173 "LEN2003",
173 "LEN2004", /* L440 */ 174 "LEN2004", /* L440 */
174 "LEN2005", 175 "LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c561..136b7b204f56 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
402 }, 402 },
403 }, 403 },
404 { 404 {
405 /* Acer Aspire 5710 */
406 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
408 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
409 },
410 },
411 {
405 /* Gericom Bellagio */ 412 /* Gericom Bellagio */
406 .matches = { 413 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), 414 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2e..e73cf2c71f35 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1217 * a=(pi*r^2)/C. 1217 * a=(pi*r^2)/C.
1218 */ 1218 */
1219 int a = data[5]; 1219 int a = data[5];
1220 int x_res = input_abs_get_res(input, ABS_X); 1220 int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
1221 int y_res = input_abs_get_res(input, ABS_Y); 1221 int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); 1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
1223 height = width * y_res / x_res; 1223 height = width * y_res / x_res;
1224 } 1224 }
1225 1225
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
1589 } else { 1589 } else {
1590 if (features->touch_max <= 2) { 1590 if (features->touch_max == 1) {
1591 input_set_abs_params(input_dev, ABS_X, 0, 1591 input_set_abs_params(input_dev, ABS_X, 0,
1592 features->x_max, features->x_fuzz, 0); 1592 features->x_max, features->x_fuzz, 0);
1593 input_set_abs_params(input_dev, ABS_Y, 0, 1593 input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1815 case MTTPC: 1815 case MTTPC:
1816 case MTTPC_B: 1816 case MTTPC_B:
1817 case TABLETPC2FG: 1817 case TABLETPC2FG:
1818 if (features->device_type == BTN_TOOL_FINGER) { 1818 if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
1819 unsigned int flags = INPUT_MT_DIRECT; 1819 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
1820
1821 if (wacom_wac->features.type == TABLETPC2FG)
1822 flags = 0;
1823
1824 input_mt_init_slots(input_dev, features->touch_max, flags);
1825 }
1826 /* fall through */ 1820 /* fall through */
1827 1821
1828 case TABLETPC: 1822 case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1883 __set_bit(BTN_RIGHT, input_dev->keybit); 1877 __set_bit(BTN_RIGHT, input_dev->keybit);
1884 1878
1885 if (features->touch_max) { 1879 if (features->touch_max) {
1886 /* touch interface */
1887 unsigned int flags = INPUT_MT_POINTER;
1888
1889 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1890 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 1880 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1891 input_set_abs_params(input_dev, 1881 input_set_abs_params(input_dev,
1892 ABS_MT_TOUCH_MAJOR, 1882 ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1894 input_set_abs_params(input_dev, 1884 input_set_abs_params(input_dev,
1895 ABS_MT_TOUCH_MINOR, 1885 ABS_MT_TOUCH_MINOR,
1896 0, features->y_max, 0, 0); 1886 0, features->y_max, 0, 0);
1897 } else {
1898 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1899 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1900 flags = 0;
1901 } 1887 }
1902 input_mt_init_slots(input_dev, features->touch_max, flags); 1888 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
1903 } else { 1889 } else {
1904 /* buttons/keys only interface */ 1890 /* buttons/keys only interface */
1905 __clear_bit(ABS_X, input_dev->absbit); 1891 __clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361f..2ce649520fe0 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
359 */ 359 */
360 err = of_property_read_u32(node, "ti,coordinate-readouts", 360 err = of_property_read_u32(node, "ti,coordinate-readouts",
361 &ts_dev->coordinate_readouts); 361 &ts_dev->coordinate_readouts);
362 if (err < 0) 362 if (err < 0) {
363 dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
363 err = of_property_read_u32(node, "ti,coordiante-readouts", 364 err = of_property_read_u32(node, "ti,coordiante-readouts",
364 &ts_dev->coordinate_readouts); 365 &ts_dev->coordinate_readouts);
366 }
367
365 if (err < 0) 368 if (err < 0)
366 return err; 369 return err;
367 370
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d3eb7b..b7ae0a0dd5b6 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@ allocerr:
2400error: 2400error:
2401 freeurbs(cs); 2401 freeurbs(cs);
2402 usb_set_intfdata(interface, NULL); 2402 usb_set_intfdata(interface, NULL);
2403 usb_put_dev(udev);
2403 gigaset_freecs(cs); 2404 gigaset_freecs(cs);
2404 return rc; 2405 return rc;
2405} 2406}
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e29b6d051103..5dede6e64376 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -278,7 +278,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
278 break; 278 break;
279 } 279 }
280 280
281 priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); 281 priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
282 resource_size(res));
282 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 283 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
283 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 284 dev_info(&pdev->dev, "control memory is not used for raminit\n");
284 else 285 else
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index d5a4f76e9474..e56c3d45b30e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -349,7 +349,8 @@ static int xgbe_probe(struct platform_device *pdev)
349 /* Calculate the number of Tx and Rx rings to be created */ 349 /* Calculate the number of Tx and Rx rings to be created */
350 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), 350 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
351 pdata->hw_feat.tx_ch_cnt); 351 pdata->hw_feat.tx_ch_cnt);
352 if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) { 352 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
353 if (ret) {
353 dev_err(dev, "error setting real tx queue count\n"); 354 dev_err(dev, "error setting real tx queue count\n");
354 goto err_io; 355 goto err_io;
355 } 356 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ce8f86966c11..d777fae86988 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@ struct sw_tx_bd {
346 u8 flags; 346 u8 flags;
347/* Set on the first BD descriptor when there is a split BD */ 347/* Set on the first BD descriptor when there is a split BD */
348#define BNX2X_TSO_SPLIT_BD (1<<0) 348#define BNX2X_TSO_SPLIT_BD (1<<0)
349#define BNX2X_HAS_SECOND_PBD (1<<1)
349}; 350};
350 351
351struct sw_rx_page { 352struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dca1236dd1cd..4e6c82e20224 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
227 --nbd; 227 --nbd;
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229 229
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
232 --nbd;
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234 }
235
230 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ 236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
231 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { 237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
232 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; 238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -3902,6 +3908,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3902 /* set encapsulation flag in start BD */ 3908 /* set encapsulation flag in start BD */
3903 SET_FLAG(tx_start_bd->general_data, 3909 SET_FLAG(tx_start_bd->general_data,
3904 ETH_TX_START_BD_TUNNEL_EXIST, 1); 3910 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3911
3912 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3913
3905 nbd++; 3914 nbd++;
3906 } else if (xmit_type & XMIT_CSUM) { 3915 } else if (xmit_type & XMIT_CSUM) {
3907 /* Set PBD in checksum offload case w/o encapsulation */ 3916 /* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 08ea91cab738..92fee842f954 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -416,6 +416,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
416 break; 416 break;
417 case PORT_FIBRE: 417 case PORT_FIBRE:
418 case PORT_DA: 418 case PORT_DA:
419 case PORT_NONE:
419 if (!(bp->port.supported[0] & SUPPORTED_FIBRE || 420 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
420 bp->port.supported[1] & SUPPORTED_FIBRE)) { 421 bp->port.supported[1] & SUPPORTED_FIBRE)) {
421 DP(BNX2X_MSG_ETHTOOL, 422 DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 28c8111502cd..417ce68449a4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1147,6 +1147,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1147 goto out; 1147 goto out;
1148 } 1148 }
1149 1149
1150 if (skb_padto(skb, ETH_ZLEN)) {
1151 ret = NETDEV_TX_OK;
1152 goto out;
1153 }
1154
1150 /* set the SKB transmit checksum */ 1155 /* set the SKB transmit checksum */
1151 if (priv->desc_64b_en) { 1156 if (priv->desc_64b_en) {
1152 ret = bcmgenet_put_tx_csum(dev, skb); 1157 ret = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index fd411d6e19a2..d813bfb1a847 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
610 return err; 610 return err;
611} 611}
612 612
613static inline bool port_is_up(struct vnet_port *vnet)
614{
615 struct vio_driver_state *vio = &vnet->vio;
616
617 return !!(vio->hs_state & VIO_HS_COMPLETE);
618}
619
613struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) 620struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
614{ 621{
615 unsigned int hash = vnet_hashfn(skb->data); 622 unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
617 struct vnet_port *port; 624 struct vnet_port *port;
618 625
619 hlist_for_each_entry(port, hp, hash) { 626 hlist_for_each_entry(port, hp, hash) {
627 if (!port_is_up(port))
628 continue;
620 if (ether_addr_equal(port->raddr, skb->data)) 629 if (ether_addr_equal(port->raddr, skb->data))
621 return port; 630 return port;
622 } 631 }
623 port = NULL; 632 list_for_each_entry(port, &vp->port_list, list) {
624 if (!list_empty(&vp->port_list)) 633 if (!port->switch_port)
625 port = list_entry(vp->port_list.next, struct vnet_port, list); 634 continue;
626 635 if (!port_is_up(port))
627 return port; 636 continue;
637 return port;
638 }
639 return NULL;
628} 640}
629 641
630struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb) 642struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index f13e0acc8a69..592977a6547c 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
378 378
379 net_device->send_section_map = 379 net_device->send_section_map =
380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL); 380 kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
381 if (net_device->send_section_map == NULL) 381 if (net_device->send_section_map == NULL) {
382 ret = -ENOMEM;
382 goto cleanup; 383 goto cleanup;
384 }
383 385
384 goto exit; 386 goto exit;
385 387
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4eaadcfcb0fe..203651ebccb0 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -255,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
255 255
256 bus->dev.parent = bus->parent; 256 bus->dev.parent = bus->parent;
257 bus->dev.class = &mdio_bus_class; 257 bus->dev.class = &mdio_bus_class;
258 bus->dev.driver = bus->parent->driver;
258 bus->dev.groups = NULL; 259 bus->dev.groups = NULL;
259 dev_set_name(&bus->dev, "%s", bus->id); 260 dev_set_name(&bus->dev, "%s", bus->id);
260 261
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4f4568ef124e..ca5ec3e18d36 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
355 phydev->bus->phy_map[phydev->addr] = phydev; 355 phydev->bus->phy_map[phydev->addr] = phydev;
356 356
357 /* Run all of the fixups for this PHY */ 357 /* Run all of the fixups for this PHY */
358 err = phy_init_hw(phydev); 358 err = phy_scan_fixups(phydev);
359 if (err) { 359 if (err) {
360 pr_err("PHY %d failed to initialize\n", phydev->addr); 360 pr_err("PHY %d failed to initialize\n", phydev->addr);
361 goto out; 361 goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
575 u32 flags, phy_interface_t interface) 575 u32 flags, phy_interface_t interface)
576{ 576{
577 struct device *d = &phydev->dev; 577 struct device *d = &phydev->dev;
578 struct module *bus_module;
578 int err; 579 int err;
579 580
580 /* Assume that if there is no driver, that it doesn't 581 /* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
599 return -EBUSY; 600 return -EBUSY;
600 } 601 }
601 602
603 /* Increment the bus module reference count */
604 bus_module = phydev->bus->dev.driver ?
605 phydev->bus->dev.driver->owner : NULL;
606 if (!try_module_get(bus_module)) {
607 dev_err(&dev->dev, "failed to get the bus module\n");
608 return -EIO;
609 }
610
602 phydev->attached_dev = dev; 611 phydev->attached_dev = dev;
603 dev->phydev = phydev; 612 dev->phydev = phydev;
604 613
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
664void phy_detach(struct phy_device *phydev) 673void phy_detach(struct phy_device *phydev)
665{ 674{
666 int i; 675 int i;
676
677 if (phydev->bus->dev.driver)
678 module_put(phydev->bus->dev.driver->owner);
679
667 phydev->attached_dev->phydev = NULL; 680 phydev->attached_dev->phydev = NULL;
668 phydev->attached_dev = NULL; 681 phydev->attached_dev = NULL;
669 phy_suspend(phydev); 682 phy_suspend(phydev);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe5d318..2a32d9167d3b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@ next_desc:
341 usb_driver_release_interface(driver, info->data); 341 usb_driver_release_interface(driver, info->data);
342 return -ENODEV; 342 return -ENODEV;
343 } 343 }
344
345 /* Some devices don't initialise properly. In particular
346 * the packet filter is not reset. There are devices that
347 * don't do reset all the way. So the packet filter should
348 * be set to a sane initial value.
349 */
350 usb_control_msg(dev->udev,
351 usb_sndctrlpipe(dev->udev, 0),
352 USB_CDC_SET_ETHERNET_PACKET_FILTER,
353 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
354 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
355 intf->cur_altsetting->desc.bInterfaceNumber,
356 NULL,
357 0,
358 USB_CTRL_SET_TIMEOUT
359 );
344 return 0; 360 return 0;
345 361
346bad_desc: 362bad_desc:
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e1e430587868..87f710476217 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -287,7 +287,7 @@
287/* USB_DEV_STAT */ 287/* USB_DEV_STAT */
288#define STAT_SPEED_MASK 0x0006 288#define STAT_SPEED_MASK 0x0006
289#define STAT_SPEED_HIGH 0x0000 289#define STAT_SPEED_HIGH 0x0000
290#define STAT_SPEED_FULL 0x0001 290#define STAT_SPEED_FULL 0x0002
291 291
292/* USB_TX_AGG */ 292/* USB_TX_AGG */
293#define TX_AGG_MAX_THRESHOLD 0x03 293#define TX_AGG_MAX_THRESHOLD 0x03
@@ -2300,9 +2300,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
2300 /* rx share fifo credit full threshold */ 2300 /* rx share fifo credit full threshold */
2301 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL); 2301 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
2302 2302
2303 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT); 2303 if (tp->udev->speed == USB_SPEED_FULL ||
2304 ocp_data &= STAT_SPEED_MASK; 2304 tp->udev->speed == USB_SPEED_LOW) {
2305 if (ocp_data == STAT_SPEED_FULL) {
2306 /* rx share fifo credit near full threshold */ 2305 /* rx share fifo credit near full threshold */
2307 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, 2306 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
2308 RXFIFO_THR2_FULL); 2307 RXFIFO_THR2_FULL);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d3f3e5d21874..1fb7b37d1402 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -340,7 +340,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
340 ndm->ndm_state = fdb->state; 340 ndm->ndm_state = fdb->state;
341 ndm->ndm_ifindex = vxlan->dev->ifindex; 341 ndm->ndm_ifindex = vxlan->dev->ifindex;
342 ndm->ndm_flags = fdb->flags; 342 ndm->ndm_flags = fdb->flags;
343 ndm->ndm_type = NDA_DST; 343 ndm->ndm_type = RTN_UNICAST;
344 344
345 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 345 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
346 goto nla_put_failure; 346 goto nla_put_failure;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b777d8f46bd5..9aa012e6ea0a 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,54 @@
26#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 26#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
27#include <asm/page.h> 27#include <asm/page.h>
28 28
29/*
30 * of_fdt_limit_memory - limit the number of regions in the /memory node
31 * @limit: maximum entries
32 *
33 * Adjust the flattened device tree to have at most 'limit' number of
34 * memory entries in the /memory node. This function may be called
35 * any time after initial_boot_param is set.
36 */
37void of_fdt_limit_memory(int limit)
38{
39 int memory;
40 int len;
41 const void *val;
42 int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
43 int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
44 const uint32_t *addr_prop;
45 const uint32_t *size_prop;
46 int root_offset;
47 int cell_size;
48
49 root_offset = fdt_path_offset(initial_boot_params, "/");
50 if (root_offset < 0)
51 return;
52
53 addr_prop = fdt_getprop(initial_boot_params, root_offset,
54 "#address-cells", NULL);
55 if (addr_prop)
56 nr_address_cells = fdt32_to_cpu(*addr_prop);
57
58 size_prop = fdt_getprop(initial_boot_params, root_offset,
59 "#size-cells", NULL);
60 if (size_prop)
61 nr_size_cells = fdt32_to_cpu(*size_prop);
62
63 cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
64
65 memory = fdt_path_offset(initial_boot_params, "/memory");
66 if (memory > 0) {
67 val = fdt_getprop(initial_boot_params, memory, "reg", &len);
68 if (len > limit*cell_size) {
69 len = limit*cell_size;
70 pr_debug("Limiting number of entries to %d\n", limit);
71 fdt_setprop(initial_boot_params, memory, "reg", val,
72 len);
73 }
74 }
75}
76
29/** 77/**
30 * of_fdt_is_compatible - Return true if given node from the given blob has 78 * of_fdt_is_compatible - Return true if given node from the given blob has
31 * compat in its compatible list 79 * compat in its compatible list
@@ -937,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
937} 985}
938#endif 986#endif
939 987
940bool __init early_init_dt_scan(void *params) 988bool __init early_init_dt_verify(void *params)
941{ 989{
942 if (!params) 990 if (!params)
943 return false; 991 return false;
@@ -951,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
951 return false; 999 return false;
952 } 1000 }
953 1001
1002 return true;
1003}
1004
1005
1006void __init early_init_dt_scan_nodes(void)
1007{
954 /* Retrieve various information from the /chosen node */ 1008 /* Retrieve various information from the /chosen node */
955 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); 1009 of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
956 1010
@@ -959,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
959 1013
960 /* Setup memory, calling early_init_dt_add_memory_arch */ 1014 /* Setup memory, calling early_init_dt_add_memory_arch */
961 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1015 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
1016}
1017
1018bool __init early_init_dt_scan(void *params)
1019{
1020 bool status;
1021
1022 status = early_init_dt_verify(params);
1023 if (!status)
1024 return false;
962 1025
1026 early_init_dt_scan_nodes();
963 return true; 1027 return true;
964} 1028}
965 1029
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece81f35..44333bd8f908 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
5# Parport configuration. 5# Parport configuration.
6# 6#
7 7
8config ARCH_MIGHT_HAVE_PC_PARPORT
9 bool
10 help
11 Select this config option from the architecture Kconfig if
12 the architecture might have PC parallel port hardware.
13
8menuconfig PARPORT 14menuconfig PARPORT
9 tristate "Parallel port support" 15 tristate "Parallel port support"
10 depends on HAS_IOMEM 16 depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
31 37
32 If unsure, say Y. 38 If unsure, say Y.
33 39
34config ARCH_MIGHT_HAVE_PC_PARPORT
35 bool
36 help
37 Select this config option from the architecture Kconfig if
38 the architecture might have PC parallel port hardware.
39
40if PARPORT 40if PARPORT
41 41
42config PARPORT_PC 42config PARPORT_PC
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95e..9f43916637ca 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
1431 1431
1432 status = readl(info->irqmux_base); 1432 status = readl(info->irqmux_base);
1433 1433
1434 for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) 1434 for_each_set_bit(n, &status, info->nbanks)
1435 __gpio_irq_handler(&info->banks[n]); 1435 __gpio_irq_handler(&info->banks[n]);
1436 1436
1437 chained_irq_exit(chip, desc); 1437 chained_irq_exit(chip, desc);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5d4de88fe5b8..eeba7544f0cd 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
1195int gnttab_init(void) 1195int gnttab_init(void)
1196{ 1196{
1197 int i; 1197 int i;
1198 unsigned long max_nr_grant_frames;
1198 unsigned int max_nr_glist_frames, nr_glist_frames; 1199 unsigned int max_nr_glist_frames, nr_glist_frames;
1199 unsigned int nr_init_grefs; 1200 unsigned int nr_init_grefs;
1200 int ret; 1201 int ret;
1201 1202
1202 gnttab_request_version(); 1203 gnttab_request_version();
1204 max_nr_grant_frames = gnttab_max_grant_frames();
1203 nr_grant_frames = 1; 1205 nr_grant_frames = 1;
1204 1206
1205 /* Determine the maximum number of frames required for the 1207 /* Determine the maximum number of frames required for the
1206 * grant reference free list on the current hypervisor. 1208 * grant reference free list on the current hypervisor.
1207 */ 1209 */
1208 BUG_ON(grefs_per_grant_frame == 0); 1210 BUG_ON(grefs_per_grant_frame == 0);
1209 max_nr_glist_frames = (gnttab_max_grant_frames() * 1211 max_nr_glist_frames = (max_nr_grant_frames *
1210 grefs_per_grant_frame / RPP); 1212 grefs_per_grant_frame / RPP);
1211 1213
1212 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *), 1214 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@ int gnttab_init(void)
1223 } 1225 }
1224 } 1226 }
1225 1227
1228 ret = arch_gnttab_init(max_nr_grant_frames,
1229 nr_status_frames(max_nr_grant_frames));
1230 if (ret < 0)
1231 goto ini_nomem;
1232
1226 if (gnttab_setup() < 0) { 1233 if (gnttab_setup() < 0) {
1227 ret = -ENODEV; 1234 ret = -ENODEV;
1228 goto ini_nomem; 1235 goto ini_nomem;
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 42dd2e499ed8..35de0c04729f 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
55 afs_uuid.time_low = uuidtime; 55 afs_uuid.time_low = uuidtime;
56 afs_uuid.time_mid = uuidtime >> 32; 56 afs_uuid.time_mid = uuidtime >> 32;
57 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK; 57 afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
58 afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME; 58 afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
59 59
60 get_random_bytes(&clockseq, 2); 60 get_random_bytes(&clockseq, 2);
61 afs_uuid.clock_seq_low = clockseq; 61 afs_uuid.clock_seq_low = clockseq;
62 afs_uuid.clock_seq_hi_and_reserved = 62 afs_uuid.clock_seq_hi_and_reserved =
63 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK; 63 (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
64 afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD; 64 afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
65 65
66 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", 66 _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
67 afs_uuid.time_low, 67 afs_uuid.time_low,
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528fb640e..a93f7e6ea4cf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
306 if (unlikely(nr < 0)) 306 if (unlikely(nr < 0))
307 return nr; 307 return nr;
308 308
309 tsk->flags = PF_DUMPCORE; 309 tsk->flags |= PF_DUMPCORE;
310 if (atomic_read(&mm->mm_users) == nr + 1) 310 if (atomic_read(&mm->mm_users) == nr + 1)
311 goto done; 311 goto done;
312 /* 312 /*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba388ac..194d0d122cae 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
198 * L1 cache. 198 * L1 cache.
199 */ 199 */
200static inline struct page *dio_get_page(struct dio *dio, 200static inline struct page *dio_get_page(struct dio *dio,
201 struct dio_submit *sdio, size_t *from, size_t *to) 201 struct dio_submit *sdio)
202{ 202{
203 int n;
204 if (dio_pages_present(sdio) == 0) { 203 if (dio_pages_present(sdio) == 0) {
205 int ret; 204 int ret;
206 205
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio,
209 return ERR_PTR(ret); 208 return ERR_PTR(ret);
210 BUG_ON(dio_pages_present(sdio) == 0); 209 BUG_ON(dio_pages_present(sdio) == 0);
211 } 210 }
212 n = sdio->head++; 211 return dio->pages[sdio->head];
213 *from = n ? 0 : sdio->from;
214 *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
215 return dio->pages[n];
216} 212}
217 213
218/** 214/**
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
911 while (sdio->block_in_file < sdio->final_block_in_request) { 907 while (sdio->block_in_file < sdio->final_block_in_request) {
912 struct page *page; 908 struct page *page;
913 size_t from, to; 909 size_t from, to;
914 page = dio_get_page(dio, sdio, &from, &to); 910
911 page = dio_get_page(dio, sdio);
915 if (IS_ERR(page)) { 912 if (IS_ERR(page)) {
916 ret = PTR_ERR(page); 913 ret = PTR_ERR(page);
917 goto out; 914 goto out;
918 } 915 }
916 from = sdio->head ? 0 : sdio->from;
917 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
918 sdio->head++;
919 919
920 while (from < to) { 920 while (from < to) {
921 unsigned this_chunk_bytes; /* # of bytes mapped */ 921 unsigned this_chunk_bytes; /* # of bytes mapped */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8474028d7848..03246cd9d47a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -907,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
907 fc->writeback_cache = 1; 907 fc->writeback_cache = 1;
908 if (arg->time_gran && arg->time_gran <= 1000000000) 908 if (arg->time_gran && arg->time_gran <= 1000000000)
909 fc->sb->s_time_gran = arg->time_gran; 909 fc->sb->s_time_gran = arg->time_gran;
910 else
911 fc->sb->s_time_gran = 1000000000;
912
913 } else { 910 } else {
914 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 911 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
915 fc->no_lock = 1; 912 fc->no_lock = 1;
@@ -938,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
938 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 935 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
939 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 936 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
940 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 937 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
941 FUSE_WRITEBACK_CACHE; 938 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
942 req->in.h.opcode = FUSE_INIT; 939 req->in.h.opcode = FUSE_INIT;
943 req->in.numargs = 1; 940 req->in.numargs = 1;
944 req->in.args[0].size = sizeof(*arg); 941 req->in.args[0].size = sizeof(*arg);
diff --git a/fs/namei.c b/fs/namei.c
index 985c6f368485..9eb787e5c167 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@ done:
2256 goto out; 2256 goto out;
2257 } 2257 }
2258 path->dentry = dentry; 2258 path->dentry = dentry;
2259 path->mnt = mntget(nd->path.mnt); 2259 path->mnt = nd->path.mnt;
2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) 2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
2261 return 1; 2261 return 1;
2262 mntget(path->mnt);
2262 follow_mount(path); 2263 follow_mount(path);
2263 error = 0; 2264 error = 0;
2264out: 2265out:
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b56b1cc02718..944275c8f56d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2879,6 +2879,7 @@ again:
2879 * return the conflicting open: 2879 * return the conflicting open:
2880 */ 2880 */
2881 if (conf->len) { 2881 if (conf->len) {
2882 kfree(conf->data);
2882 conf->len = 0; 2883 conf->len = 0;
2883 conf->data = NULL; 2884 conf->data = NULL;
2884 goto again; 2885 goto again;
@@ -2891,6 +2892,7 @@ again:
2891 if (conf->len) { 2892 if (conf->len) {
2892 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); 2893 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
2893 p = xdr_encode_opaque(p, conf->data, conf->len); 2894 p = xdr_encode_opaque(p, conf->data, conf->len);
2895 kfree(conf->data);
2894 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ 2896 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
2895 p = xdr_encode_hyper(p, (u64)0); /* clientid */ 2897 p = xdr_encode_hyper(p, (u64)0); /* clientid */
2896 *p++ = cpu_to_be32(0); /* length of owner name */ 2898 *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
2907 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); 2909 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
2908 else if (nfserr == nfserr_denied) 2910 else if (nfserr == nfserr_denied)
2909 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); 2911 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
2910 kfree(lock->lk_denied.ld_owner.data); 2912
2911 return nfserr; 2913 return nfserr;
2912} 2914}
2913 2915
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff18404..c69e6d43a0d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
843 843
844 /* wrap around? */ 844 /* wrap around? */
845 len = sizeof(*new_xattr) + size; 845 len = sizeof(*new_xattr) + size;
846 if (len <= sizeof(*new_xattr)) 846 if (len < sizeof(*new_xattr))
847 return NULL; 847 return NULL;
848 848
849 new_xattr = kmalloc(len, GFP_KERNEL); 849 new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 002a2855c046..3d33794e4f3e 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,7 +30,8 @@
30#define MUX_MODE14 0xe 30#define MUX_MODE14 0xe
31#define MUX_MODE15 0xf 31#define MUX_MODE15 0xf
32 32
33#define PULL_ENA (1 << 16) 33#define PULL_ENA (0 << 16)
34#define PULL_DIS (1 << 16)
34#define PULL_UP (1 << 17) 35#define PULL_UP (1 << 17)
35#define INPUT_EN (1 << 18) 36#define INPUT_EN (1 << 18)
36#define SLEWCONTROL (1 << 19) 37#define SLEWCONTROL (1 << 19)
@@ -38,10 +39,10 @@
38#define WAKEUP_EVENT (1 << 25) 39#define WAKEUP_EVENT (1 << 25)
39 40
40/* Active pin states */ 41/* Active pin states */
41#define PIN_OUTPUT 0 42#define PIN_OUTPUT (0 | PULL_DIS)
42#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) 43#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP)
43#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) 44#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA)
44#define PIN_INPUT INPUT_EN 45#define PIN_INPUT (INPUT_EN | PULL_DIS)
45#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) 46#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
46#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) 47#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
47#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN) 48#define PIN_INPUT_PULLDOWN (PULL_ENA | INPUT_EN)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
593 struct device *dev; 593 struct device *dev;
594 void __iomem * const *iomap; 594 void __iomem * const *iomap;
595 unsigned int n_ports; 595 unsigned int n_ports;
596 unsigned int n_tags; /* nr of NCQ tags */
596 void *private_data; 597 void *private_data;
597 struct ata_port_operations *ops; 598 struct ata_port_operations *ops;
598 unsigned long flags; 599 unsigned long flags;
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 05117899fcb4..0ff360d5b3b3 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
73 int depth, void *data); 73 int depth, void *data);
74 74
75extern bool early_init_dt_scan(void *params); 75extern bool early_init_dt_scan(void *params);
76extern bool early_init_dt_verify(void *params);
77extern void early_init_dt_scan_nodes(void);
76 78
77extern const char *of_flat_dt_get_machine_name(void); 79extern const char *of_flat_dt_get_machine_name(void);
78extern const void *of_flat_dt_match_machine(const void *default_match, 80extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
84extern void early_init_devtree(void *); 86extern void early_init_devtree(void *);
85extern void early_get_first_memblock_info(void *, phys_addr_t *); 87extern void early_get_first_memblock_info(void *, phys_addr_t *);
86extern u64 fdt_translate_address(const void *blob, int node_offset); 88extern u64 fdt_translate_address(const void *blob, int node_offset);
89extern void of_fdt_limit_memory(int limit);
87#else /* CONFIG_OF_FLATTREE */ 90#else /* CONFIG_OF_FLATTREE */
88static inline void early_init_fdt_scan_reserved_mem(void) {} 91static inline void early_init_fdt_scan_reserved_mem(void) {}
89static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 92static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
399} 399}
400 400
401/* 401/*
402 * Get the offset in PAGE_SIZE.
403 * (TODO: hugepage should have ->index in PAGE_SIZE)
404 */
405static inline pgoff_t page_to_pgoff(struct page *page)
406{
407 if (unlikely(PageHeadHuge(page)))
408 return page->index << compound_order(page);
409 else
410 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411}
412
413/*
402 * Return byte-offset into filesystem object for page. 414 * Return byte-offset into filesystem object for page.
403 */ 415 */
404static inline loff_t page_offset(struct page *page) 416static inline loff_t page_offset(struct page *page)
diff --git a/include/net/ip.h b/include/net/ip.h
index 09b32da1b929..db4a771b9ef3 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -316,16 +316,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
316 } 316 }
317} 317}
318 318
319#define IP_IDENTS_SZ 2048u 319u32 ip_idents_reserve(u32 hash, int segs);
320extern atomic_t *ip_idents;
321
322static inline u32 ip_idents_reserve(u32 hash, int segs)
323{
324 atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
325
326 return atomic_add_return(segs, id_ptr) - segs;
327}
328
329void __ip_select_ident(struct iphdr *iph, int segs); 320void __ip_select_ident(struct iphdr *iph, int segs);
330 321
331static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs) 322static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8a1b1f..25084a052a1e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
101 * - add FATTR_CTIME 101 * - add FATTR_CTIME
102 * - add ctime and ctimensec to fuse_setattr_in 102 * - add ctime and ctimensec to fuse_setattr_in
103 * - add FUSE_RENAME2 request 103 * - add FUSE_RENAME2 request
104 * - add FUSE_NO_OPEN_SUPPORT flag
104 */ 105 */
105 106
106#ifndef _LINUX_FUSE_H 107#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
229 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus 230 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
230 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 231 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
231 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 232 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
233 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
232 */ 234 */
233#define FUSE_ASYNC_READ (1 << 0) 235#define FUSE_ASYNC_READ (1 << 0)
234#define FUSE_POSIX_LOCKS (1 << 1) 236#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
247#define FUSE_READDIRPLUS_AUTO (1 << 14) 249#define FUSE_READDIRPLUS_AUTO (1 << 14)
248#define FUSE_ASYNC_DIO (1 << 15) 250#define FUSE_ASYNC_DIO (1 << 15)
249#define FUSE_WRITEBACK_CACHE (1 << 16) 251#define FUSE_WRITEBACK_CACHE (1 << 16)
252#define FUSE_NO_OPEN_SUPPORT (1 << 17)
250 253
251/** 254/**
252 * CUSE INIT request/reply flags 255 * CUSE INIT request/reply flags
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index a5af2a26d94f..5c1aba154b64 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
170 unmap->dev_bus_addr = 0; 170 unmap->dev_bus_addr = 0;
171} 171}
172 172
173int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
173int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes, 174int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
174 unsigned long max_nr_gframes, 175 unsigned long max_nr_gframes,
175 void **__shared); 176 void **__shared);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b0c95f0f06fd..6b17ac1b0c2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
7458 struct perf_event_context *child_ctx, 7458 struct perf_event_context *child_ctx,
7459 struct task_struct *child) 7459 struct task_struct *child)
7460{ 7460{
7461 perf_remove_from_context(child_event, true); 7461 /*
7462 * Do not destroy the 'original' grouping; because of the context
7463 * switch optimization the original events could've ended up in a
7464 * random child task.
7465 *
7466 * If we were to destroy the original group, all group related
7467 * operations would cease to function properly after this random
7468 * child dies.
7469 *
7470 * Do destroy all inherited groups, we don't care about those
7471 * and being thorough is better.
7472 */
7473 perf_remove_from_context(child_event, !!child_event->parent);
7462 7474
7463 /* 7475 /*
7464 * It can happen that the parent exits first, and has events 7476 * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7474static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7486static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7475{ 7487{
7476 struct perf_event *child_event, *next; 7488 struct perf_event *child_event, *next;
7477 struct perf_event_context *child_ctx; 7489 struct perf_event_context *child_ctx, *parent_ctx;
7478 unsigned long flags; 7490 unsigned long flags;
7479 7491
7480 if (likely(!child->perf_event_ctxp[ctxn])) { 7492 if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7499 raw_spin_lock(&child_ctx->lock); 7511 raw_spin_lock(&child_ctx->lock);
7500 task_ctx_sched_out(child_ctx); 7512 task_ctx_sched_out(child_ctx);
7501 child->perf_event_ctxp[ctxn] = NULL; 7513 child->perf_event_ctxp[ctxn] = NULL;
7514
7515 /*
7516 * In order to avoid freeing: child_ctx->parent_ctx->task
7517 * under perf_event_context::lock, grab another reference.
7518 */
7519 parent_ctx = child_ctx->parent_ctx;
7520 if (parent_ctx)
7521 get_ctx(parent_ctx);
7522
7502 /* 7523 /*
7503 * If this context is a clone; unclone it so it can't get 7524 * If this context is a clone; unclone it so it can't get
7504 * swapped to another process while we're removing all 7525 * swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7509 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7530 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7510 7531
7511 /* 7532 /*
7533 * Now that we no longer hold perf_event_context::lock, drop
7534 * our extra child_ctx->parent_ctx reference.
7535 */
7536 if (parent_ctx)
7537 put_ctx(parent_ctx);
7538
7539 /*
7512 * Report the task dead after unscheduling the events so that we 7540 * Report the task dead after unscheduling the events so that we
7513 * won't get any samples after PERF_RECORD_EXIT. We can however still 7541 * won't get any samples after PERF_RECORD_EXIT. We can however still
7514 * get a few PERF_RECORD_READ events. 7542 * get a few PERF_RECORD_READ events.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289df5a7..734e9a7d280b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
2037{ 2037{
2038 unsigned long *iter; 2038 unsigned long *iter;
2039 struct kprobe_blacklist_entry *ent; 2039 struct kprobe_blacklist_entry *ent;
2040 unsigned long offset = 0, size = 0; 2040 unsigned long entry, offset = 0, size = 0;
2041 2041
2042 for (iter = start; iter < end; iter++) { 2042 for (iter = start; iter < end; iter++) {
2043 if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { 2043 entry = arch_deref_entry_point((void *)*iter);
2044 pr_err("Failed to find blacklist %p\n", (void *)*iter); 2044
2045 if (!kernel_text_address(entry) ||
2046 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2047 pr_err("Failed to find blacklist at %p\n",
2048 (void *)entry);
2045 continue; 2049 continue;
2046 } 2050 }
2047 2051
2048 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2052 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2049 if (!ent) 2053 if (!ent)
2050 return -ENOMEM; 2054 return -ENOMEM;
2051 ent->start_addr = *iter; 2055 ent->start_addr = entry;
2052 ent->end_addr = *iter + size; 2056 ent->end_addr = entry + size;
2053 INIT_LIST_HEAD(&ent->list); 2057 INIT_LIST_HEAD(&ent->list);
2054 list_add_tail(&ent->list, &kprobe_blacklist); 2058 list_add_tail(&ent->list, &kprobe_blacklist);
2055 } 2059 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..9221c02ed9e2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2604 } else { 2604 } else {
2605 if (cow) 2605 if (cow)
2606 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2607 entry = huge_ptep_get(src_pte);
2607 ptepage = pte_page(entry); 2608 ptepage = pte_page(entry);
2608 get_page(ptepage); 2609 get_page(ptepage);
2609 page_dup_rmap(ptepage); 2610 page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e328931..7211a73ba14d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435 if (av == NULL) /* Not actually mapped anymore */ 435 if (av == NULL) /* Not actually mapped anymore */
436 return; 436 return;
437 437
438 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff = page_to_pgoff(page);
439 read_lock(&tasklist_lock); 439 read_lock(&tasklist_lock);
440 for_each_process (tsk) { 440 for_each_process (tsk) {
441 struct anon_vma_chain *vmac; 441 struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469 mutex_lock(&mapping->i_mmap_mutex); 469 mutex_lock(&mapping->i_mmap_mutex);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 472 pgoff_t pgoff = page_to_pgoff(page);
473 struct task_struct *t = task_early_kill(tsk, force_early); 473 struct task_struct *t = task_early_kill(tsk, force_early);
474 474
475 if (!t) 475 if (!t)
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..7e8d8205b610 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2882 * if page by the offset is not ready to be mapped (cold cache or 2882 * if page by the offset is not ready to be mapped (cold cache or
2883 * something). 2883 * something).
2884 */ 2884 */
2885 if (vma->vm_ops->map_pages && fault_around_pages() > 1) { 2885 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2886 fault_around_pages() > 1) {
2886 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2887 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2887 do_fault_around(vma, address, pte, pgoff, flags); 2888 do_fault_around(vma, address, pte, pgoff, flags);
2888 if (!pte_same(*pte, orig_pte)) 2889 if (!pte_same(*pte, orig_pte))
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa91845..be6dbf995c0c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@ out:
988 * it. Otherwise, putback_lru_page() will drop the reference grabbed 988 * it. Otherwise, putback_lru_page() will drop the reference grabbed
989 * during isolation. 989 * during isolation.
990 */ 990 */
991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) 991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
992 ClearPageSwapBacked(newpage);
992 put_new_page(newpage, private); 993 put_new_page(newpage, private);
993 else 994 } else
994 putback_lru_page(newpage); 995 putback_lru_page(newpage);
995 996
996 if (result) { 997 if (result) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ea758b898fd..8bcfe3ae20cb 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6062,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
6062} 6062}
6063 6063
6064/** 6064/**
6065 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 6065 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
6066 * @page: The page within the block of interest 6066 * @page: The page within the block of interest
6067 * @start_bitidx: The first bit of interest to retrieve 6067 * @pfn: The target page frame number
6068 * @end_bitidx: The last bit of interest 6068 * @end_bitidx: The last bit of interest to retrieve
6069 * returns pageblock_bits flags 6069 * @mask: mask of bits that the caller is interested in
6070 *
6071 * Return: pageblock_bits flags
6070 */ 6072 */
6071unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 6073unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6072 unsigned long end_bitidx, 6074 unsigned long end_bitidx,
@@ -6091,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
6091/** 6093/**
6092 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 6094 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
6093 * @page: The page within the block of interest 6095 * @page: The page within the block of interest
6094 * @start_bitidx: The first bit of interest
6095 * @end_bitidx: The last bit of interest
6096 * @flags: The flags to set 6096 * @flags: The flags to set
6097 * @pfn: The target page frame number
6098 * @end_bitidx: The last bit of interest
6099 * @mask: mask of bits that the caller is interested in
6097 */ 6100 */
6098void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 6101void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
6099 unsigned long pfn, 6102 unsigned long pfn,
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517static inline unsigned long 517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma) 518__vma_address(struct page *page, struct vm_area_struct *vma)
519{ 519{
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 520 pgoff_t pgoff = page_to_pgoff(page);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526} 522}
527 523
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1639static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1640{ 1636{
1641 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1638 pgoff_t pgoff = page_to_pgoff(page);
1643 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1644 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1645 1641
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1680static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1681{ 1677{
1682 struct address_space *mapping = page->mapping; 1678 struct address_space *mapping = page->mapping;
1683 pgoff_t pgoff = page->index << compound_order(page); 1679 pgoff_t pgoff = page_to_pgoff(page);
1684 struct vm_area_struct *vma; 1680 struct vm_area_struct *vma;
1685 int ret = SWAP_AGAIN; 1681 int ret = SWAP_AGAIN;
1686 1682
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49b6ded..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
85 * a time): we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */ 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
90 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468 return; 468 return;
469 469
470 index = start; 470 index = start;
471 for ( ; ; ) { 471 while (index < end) {
472 cond_resched(); 472 cond_resched();
473 473
474 pvec.nr = find_get_entries(mapping, index, 474 pvec.nr = find_get_entries(mapping, index,
475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 min(end - index, (pgoff_t)PAGEVEC_SIZE),
476 pvec.pages, indices); 476 pvec.pages, indices);
477 if (!pvec.nr) { 477 if (!pvec.nr) {
478 if (index == start || unfalloc) 478 /* If all gone or hole-punch or unfalloc, we're done */
479 if (index == start || end != -1)
479 break; 480 break;
481 /* But if truncating, restart to make sure all gone */
480 index = start; 482 index = start;
481 continue; 483 continue;
482 } 484 }
483 if ((index == start || unfalloc) && indices[0] >= end) {
484 pagevec_remove_exceptionals(&pvec);
485 pagevec_release(&pvec);
486 break;
487 }
488 mem_cgroup_uncharge_start(); 485 mem_cgroup_uncharge_start();
489 for (i = 0; i < pagevec_count(&pvec); i++) { 486 for (i = 0; i < pagevec_count(&pvec); i++) {
490 struct page *page = pvec.pages[i]; 487 struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496 if (radix_tree_exceptional_entry(page)) { 493 if (radix_tree_exceptional_entry(page)) {
497 if (unfalloc) 494 if (unfalloc)
498 continue; 495 continue;
499 nr_swaps_freed += !shmem_free_swap(mapping, 496 if (shmem_free_swap(mapping, index, page)) {
500 index, page); 497 /* Swap was replaced by page: retry */
498 index--;
499 break;
500 }
501 nr_swaps_freed++;
501 continue; 502 continue;
502 } 503 }
503 504
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506 if (page->mapping == mapping) { 507 if (page->mapping == mapping) {
507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 VM_BUG_ON_PAGE(PageWriteback(page), page);
508 truncate_inode_page(mapping, page); 509 truncate_inode_page(mapping, page);
510 } else {
511 /* Page was replaced by swap: retry */
512 unlock_page(page);
513 index--;
514 break;
509 } 515 }
510 } 516 }
511 unlock_page(page); 517 unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760 spin_lock(&inode->i_lock); 766 spin_lock(&inode->i_lock);
761 shmem_falloc = inode->i_private; 767 shmem_falloc = inode->i_private;
762 if (shmem_falloc && 768 if (shmem_falloc &&
763 !shmem_falloc->mode && 769 !shmem_falloc->waitq &&
764 index >= shmem_falloc->start && 770 index >= shmem_falloc->start &&
765 index < shmem_falloc->next) 771 index < shmem_falloc->next)
766 shmem_falloc->nr_unswapped++; 772 shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1248 * Trinity finds that probing a hole which tmpfs is punching can 1254 * Trinity finds that probing a hole which tmpfs is punching can
1249 * prevent the hole-punch from ever completing: which in turn 1255 * prevent the hole-punch from ever completing: which in turn
1250 * locks writers out with its hold on i_mutex. So refrain from 1256 * locks writers out with its hold on i_mutex. So refrain from
1251 * faulting pages into the hole while it's being punched, and 1257 * faulting pages into the hole while it's being punched. Although
1252 * wait on i_mutex to be released if vmf->flags permits. 1258 * shmem_undo_range() does remove the additions, it may be unable to
1259 * keep up, as each new page needs its own unmap_mapping_range() call,
1260 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1261 *
1262 * It does not matter if we sometimes reach this check just before the
1263 * hole-punch begins, so that one fault then races with the punch:
1264 * we just need to make racing faults a rare case.
1265 *
1266 * The implementation below would be much simpler if we just used a
1267 * standard mutex or completion: but we cannot take i_mutex in fault,
1268 * and bloating every shmem inode for this unlikely case would be sad.
1253 */ 1269 */
1254 if (unlikely(inode->i_private)) { 1270 if (unlikely(inode->i_private)) {
1255 struct shmem_falloc *shmem_falloc; 1271 struct shmem_falloc *shmem_falloc;
1256 1272
1257 spin_lock(&inode->i_lock); 1273 spin_lock(&inode->i_lock);
1258 shmem_falloc = inode->i_private; 1274 shmem_falloc = inode->i_private;
1259 if (!shmem_falloc || 1275 if (shmem_falloc &&
1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1276 shmem_falloc->waitq &&
1261 vmf->pgoff < shmem_falloc->start || 1277 vmf->pgoff >= shmem_falloc->start &&
1262 vmf->pgoff >= shmem_falloc->next) 1278 vmf->pgoff < shmem_falloc->next) {
1263 shmem_falloc = NULL; 1279 wait_queue_head_t *shmem_falloc_waitq;
1264 spin_unlock(&inode->i_lock); 1280 DEFINE_WAIT(shmem_fault_wait);
1265 /* 1281
1266 * i_lock has protected us from taking shmem_falloc seriously 1282 ret = VM_FAULT_NOPAGE;
1267 * once return from shmem_fallocate() went back up that stack.
1268 * i_lock does not serialize with i_mutex at all, but it does
1269 * not matter if sometimes we wait unnecessarily, or sometimes
1270 * miss out on waiting: we just need to make those cases rare.
1271 */
1272 if (shmem_falloc) {
1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1283 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1284 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285 /* It's polite to up mmap_sem if we can */
1275 up_read(&vma->vm_mm->mmap_sem); 1286 up_read(&vma->vm_mm->mmap_sem);
1276 mutex_lock(&inode->i_mutex); 1287 ret = VM_FAULT_RETRY;
1277 mutex_unlock(&inode->i_mutex);
1278 return VM_FAULT_RETRY;
1279 } 1288 }
1280 /* cond_resched? Leave that to GUP or return to user */ 1289
1281 return VM_FAULT_NOPAGE; 1290 shmem_falloc_waitq = shmem_falloc->waitq;
1291 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292 TASK_UNINTERRUPTIBLE);
1293 spin_unlock(&inode->i_lock);
1294 schedule();
1295
1296 /*
1297 * shmem_falloc_waitq points into the shmem_fallocate()
1298 * stack of the hole-punching task: shmem_falloc_waitq
1299 * is usually invalid by the time we reach here, but
1300 * finish_wait() does not dereference it in that case;
1301 * though i_lock needed lest racing with wake_up_all().
1302 */
1303 spin_lock(&inode->i_lock);
1304 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305 spin_unlock(&inode->i_lock);
1306 return ret;
1282 } 1307 }
1308 spin_unlock(&inode->i_lock);
1283 } 1309 }
1284 1310
1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1774 1800
1775 mutex_lock(&inode->i_mutex); 1801 mutex_lock(&inode->i_mutex);
1776 1802
1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778
1779 if (mode & FALLOC_FL_PUNCH_HOLE) { 1803 if (mode & FALLOC_FL_PUNCH_HOLE) {
1780 struct address_space *mapping = file->f_mapping; 1804 struct address_space *mapping = file->f_mapping;
1781 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1805 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1806 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1783 1808
1809 shmem_falloc.waitq = &shmem_falloc_waitq;
1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1810 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1811 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1786 spin_lock(&inode->i_lock); 1812 spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1792 1 + unmap_end - unmap_start, 0); 1818 1 + unmap_end - unmap_start, 0);
1793 shmem_truncate_range(inode, offset, offset + len - 1); 1819 shmem_truncate_range(inode, offset, offset + len - 1);
1794 /* No need to unmap again: hole-punching leaves COWed pages */ 1820 /* No need to unmap again: hole-punching leaves COWed pages */
1821
1822 spin_lock(&inode->i_lock);
1823 inode->i_private = NULL;
1824 wake_up_all(&shmem_falloc_waitq);
1825 spin_unlock(&inode->i_lock);
1795 error = 0; 1826 error = 0;
1796 goto undone; 1827 goto out;
1797 } 1828 }
1798 1829
1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1830 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1809 goto out; 1840 goto out;
1810 } 1841 }
1811 1842
1843 shmem_falloc.waitq = NULL;
1812 shmem_falloc.start = start; 1844 shmem_falloc.start = start;
1813 shmem_falloc.next = start; 1845 shmem_falloc.next = start;
1814 shmem_falloc.nr_falloced = 0; 1846 shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a0db6f..d31c4bacc6a2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
55 continue; 55 continue;
56 } 56 }
57 57
58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB)
59 if (!strcmp(s->name, name)) { 59 if (!strcmp(s->name, name)) {
60 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
61 __func__, name); 61 __func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355 for ( ; ; ) { 355 for ( ; ; ) {
356 cond_resched(); 356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index, 357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), 358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359 indices)) { 359 /* If all gone from start onwards, we're done */
360 if (index == start) 360 if (index == start)
361 break; 361 break;
362 /* Otherwise restart to make sure all gone */
362 index = start; 363 index = start;
363 continue; 364 continue;
364 } 365 }
365 if (index == start && indices[0] >= end) { 366 if (index == start && indices[0] >= end) {
367 /* All gone out of hole to be punched, we're done */
366 pagevec_remove_exceptionals(&pvec); 368 pagevec_remove_exceptionals(&pvec);
367 pagevec_release(&pvec); 369 pagevec_release(&pvec);
368 break; 370 break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373 375
374 /* We rely upon deletion not changing page->index */ 376 /* We rely upon deletion not changing page->index */
375 index = indices[i]; 377 index = indices[i];
376 if (index >= end) 378 if (index >= end) {
379 /* Restart punch to make sure all gone */
380 index = start - 1;
377 break; 381 break;
382 }
378 383
379 if (radix_tree_exceptional_entry(page)) { 384 if (radix_tree_exceptional_entry(page)) {
380 clear_exceptional_entry(mapping, index, page); 385 clear_exceptional_entry(mapping, index, page);
diff --git a/net/compat.c b/net/compat.c
index 9a76eaf63184..bc8aeefddf3f 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
85{ 85{
86 int tot_len; 86 int tot_len;
87 87
88 if (kern_msg->msg_namelen) { 88 if (kern_msg->msg_name && kern_msg->msg_namelen) {
89 if (mode == VERIFY_READ) { 89 if (mode == VERIFY_READ) {
90 int err = move_addr_to_kernel(kern_msg->msg_name, 90 int err = move_addr_to_kernel(kern_msg->msg_name,
91 kern_msg->msg_namelen, 91 kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93 if (err < 0) 93 if (err < 0)
94 return err; 94 return err;
95 } 95 }
96 if (kern_msg->msg_name) 96 kern_msg->msg_name = kern_address;
97 kern_msg->msg_name = kern_address; 97 } else {
98 } else
99 kern_msg->msg_name = NULL; 98 kern_msg->msg_name = NULL;
99 kern_msg->msg_namelen = 0;
100 }
100 101
101 tot_len = iov_from_user_compat_to_kern(kern_iov, 102 tot_len = iov_from_user_compat_to_kern(kern_iov,
102 (struct compat_iovec __user *)kern_msg->msg_iov, 103 (struct compat_iovec __user *)kern_msg->msg_iov,
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 827dd6beb49c..e1ec45ab1e63 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
39{ 39{
40 int size, ct, err; 40 int size, ct, err;
41 41
42 if (m->msg_namelen) { 42 if (m->msg_name && m->msg_namelen) {
43 if (mode == VERIFY_READ) { 43 if (mode == VERIFY_READ) {
44 void __user *namep; 44 void __user *namep;
45 namep = (void __user __force *) m->msg_name; 45 namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
48 if (err < 0) 48 if (err < 0)
49 return err; 49 return err;
50 } 50 }
51 if (m->msg_name) 51 m->msg_name = address;
52 m->msg_name = address;
53 } else { 52 } else {
54 m->msg_name = NULL; 53 m->msg_name = NULL;
54 m->msg_namelen = 0;
55 } 55 }
56 56
57 size = m->msg_iovlen * sizeof(struct iovec); 57 size = m->msg_iovlen * sizeof(struct iovec);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 559890b0f0a2..ef31fef25e5a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2249 ndm->ndm_pad1 = 0; 2249 ndm->ndm_pad1 = 0;
2250 ndm->ndm_pad2 = 0; 2250 ndm->ndm_pad2 = 0;
2251 ndm->ndm_flags = pn->flags | NTF_PROXY; 2251 ndm->ndm_flags = pn->flags | NTF_PROXY;
2252 ndm->ndm_type = NDA_DST; 2252 ndm->ndm_type = RTN_UNICAST;
2253 ndm->ndm_ifindex = pn->dev->ifindex; 2253 ndm->ndm_ifindex = pn->dev->ifindex;
2254 ndm->ndm_state = NUD_NONE; 2254 ndm->ndm_state = NUD_NONE;
2255 2255
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3162ea923ded..190199851c9a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
457 return neigh_create(&arp_tbl, pkey, dev); 457 return neigh_create(&arp_tbl, pkey, dev);
458} 458}
459 459
460atomic_t *ip_idents __read_mostly; 460#define IP_IDENTS_SZ 2048u
461EXPORT_SYMBOL(ip_idents); 461struct ip_ident_bucket {
462 atomic_t id;
463 u32 stamp32;
464};
465
466static struct ip_ident_bucket *ip_idents __read_mostly;
467
468/* In order to protect privacy, we add a perturbation to identifiers
469 * if one generator is seldom used. This makes hard for an attacker
470 * to infer how many packets were sent between two points in time.
471 */
472u32 ip_idents_reserve(u32 hash, int segs)
473{
474 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
475 u32 old = ACCESS_ONCE(bucket->stamp32);
476 u32 now = (u32)jiffies;
477 u32 delta = 0;
478
479 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
480 delta = prandom_u32_max(now - old);
481
482 return atomic_add_return(segs + delta, &bucket->id) - segs;
483}
484EXPORT_SYMBOL(ip_idents_reserve);
462 485
463void __ip_select_ident(struct iphdr *iph, int segs) 486void __ip_select_ident(struct iphdr *iph, int segs)
464{ 487{
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
467 490
468 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); 491 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
469 492
470 hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd); 493 hash = jhash_3words((__force u32)iph->daddr,
494 (__force u32)iph->saddr,
495 iph->protocol,
496 ip_idents_hashrnd);
471 id = ip_idents_reserve(hash, segs); 497 id = ip_idents_reserve(hash, segs);
472 iph->id = htons(id); 498 iph->id = htons(id);
473} 499}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2e339d241b69..f5dafe609f8b 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -546,6 +546,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
546 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd)); 546 net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
547 547
548 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd); 548 hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
549 hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
550
549 id = ip_idents_reserve(hash, 1); 551 id = ip_idents_reserve(hash, 1);
550 fhdr->identification = htonl(id); 552 fhdr->identification = htonl(id);
551} 553}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a8eb0a89326a..610e19c0e13f 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
797 ip_vs_control_del(cp); 797 ip_vs_control_del(cp);
798 798
799 if (cp->flags & IP_VS_CONN_F_NFCT) { 799 if (cp->flags & IP_VS_CONN_F_NFCT) {
800 ip_vs_conn_drop_conntrack(cp);
801 /* Do not access conntracks during subsys cleanup 800 /* Do not access conntracks during subsys cleanup
802 * because nf_conntrack_find_get can not be used after 801 * because nf_conntrack_find_get can not be used after
803 * conntrack cleanup for the net. 802 * conntrack cleanup for the net.
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 9de23a222d3f..06a9ee6b2d3a 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
1097 asoc->c = new->c; 1097 asoc->c = new->c;
1098 asoc->peer.rwnd = new->peer.rwnd; 1098 asoc->peer.rwnd = new->peer.rwnd;
1099 asoc->peer.sack_needed = new->peer.sack_needed; 1099 asoc->peer.sack_needed = new->peer.sack_needed;
1100 asoc->peer.auth_capable = new->peer.auth_capable;
1100 asoc->peer.i = new->peer.i; 1101 asoc->peer.i = new->peer.i;
1101 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 1102 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1102 asoc->peer.i.initial_tsn, GFP_ATOMIC); 1103 asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index a8ef5108e0d8..0525d78ba328 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2097 goto no_transform; 2097 goto no_transform;
2098 } 2098 }
2099 2099
2100 dst_hold(&xdst->u.dst);
2101 xdst->u.dst.flags |= DST_NOCACHE;
2100 route = xdst->route; 2102 route = xdst->route;
2101 } 2103 }
2102 } 2104 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 412d9dc3a873..d4db6ebb089d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
177 attrs[XFRMA_ALG_AEAD] || 177 attrs[XFRMA_ALG_AEAD] ||
178 attrs[XFRMA_ALG_CRYPT] || 178 attrs[XFRMA_ALG_CRYPT] ||
179 attrs[XFRMA_ALG_COMP] || 179 attrs[XFRMA_ALG_COMP] ||
180 attrs[XFRMA_TFCPAD] || 180 attrs[XFRMA_TFCPAD])
181 (ntohl(p->id.spi) >= 0x10000))
182
183 goto out; 181 goto out;
184 break; 182 break;
185 183
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
207 attrs[XFRMA_ALG_AUTH] || 205 attrs[XFRMA_ALG_AUTH] ||
208 attrs[XFRMA_ALG_AUTH_TRUNC] || 206 attrs[XFRMA_ALG_AUTH_TRUNC] ||
209 attrs[XFRMA_ALG_CRYPT] || 207 attrs[XFRMA_ALG_CRYPT] ||
210 attrs[XFRMA_TFCPAD]) 208 attrs[XFRMA_TFCPAD] ||
209 (ntohl(p->id.spi) >= 0x10000))
211 goto out; 210 goto out;
212 break; 211 break;
213 212
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb80ea7..70faa3a32526 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
379 struct special_params *params = bebob->maudio_special_quirk; 379 struct special_params *params = bebob->maudio_special_quirk;
380 int err, id; 380 int err, id;
381 381
382 mutex_lock(&bebob->mutex);
383
384 id = uval->value.enumerated.item[0]; 382 id = uval->value.enumerated.item[0];
385 if (id >= ARRAY_SIZE(special_clk_labels)) 383 if (id >= ARRAY_SIZE(special_clk_labels))
386 return 0; 384 return -EINVAL;
385
386 mutex_lock(&bebob->mutex);
387 387
388 err = avc_maudio_set_special_clk(bebob, id, 388 err = avc_maudio_set_special_clk(bebob, id,
389 params->dig_in_fmt, 389 params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
391 params->clk_lock); 391 params->clk_lock);
392 mutex_unlock(&bebob->mutex); 392 mutex_unlock(&bebob->mutex);
393 393
394 return err >= 0; 394 if (err >= 0)
395 err = 1;
396
397 return err;
395} 398}
396static struct snd_kcontrol_new special_clk_ctl = { 399static struct snd_kcontrol_new special_clk_ctl = {
397 .name = "Clock Source", 400 .name = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
434 .get = special_sync_ctl_get, 437 .get = special_sync_ctl_get,
435}; 438};
436 439
437/* Digital interface control for special firmware */ 440/* Digital input interface control for special firmware */
438static char *const special_dig_iface_labels[] = { 441static char *const special_dig_in_iface_labels[] = {
439 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" 442 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
440}; 443};
441static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, 444static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
443{ 446{
444 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 447 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
445 einf->count = 1; 448 einf->count = 1;
446 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels); 449 einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
447 450
448 if (einf->value.enumerated.item >= einf->value.enumerated.items) 451 if (einf->value.enumerated.item >= einf->value.enumerated.items)
449 einf->value.enumerated.item = einf->value.enumerated.items - 1; 452 einf->value.enumerated.item = einf->value.enumerated.items - 1;
450 453
451 strcpy(einf->value.enumerated.name, 454 strcpy(einf->value.enumerated.name,
452 special_dig_iface_labels[einf->value.enumerated.item]); 455 special_dig_in_iface_labels[einf->value.enumerated.item]);
453 456
454 return 0; 457 return 0;
455} 458}
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
491 unsigned int id, dig_in_fmt, dig_in_iface; 494 unsigned int id, dig_in_fmt, dig_in_iface;
492 int err; 495 int err;
493 496
494 mutex_lock(&bebob->mutex);
495
496 id = uval->value.enumerated.item[0]; 497 id = uval->value.enumerated.item[0];
498 if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
499 return -EINVAL;
497 500
498 /* decode user value */ 501 /* decode user value */
499 dig_in_fmt = (id >> 1) & 0x01; 502 dig_in_fmt = (id >> 1) & 0x01;
500 dig_in_iface = id & 0x01; 503 dig_in_iface = id & 0x01;
501 504
505 mutex_lock(&bebob->mutex);
506
502 err = avc_maudio_set_special_clk(bebob, 507 err = avc_maudio_set_special_clk(bebob,
503 params->clk_src, 508 params->clk_src,
504 dig_in_fmt, 509 dig_in_fmt,
505 params->dig_out_fmt, 510 params->dig_out_fmt,
506 params->clk_lock); 511 params->clk_lock);
507 if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */ 512 if (err < 0)
513 goto end;
514
515 /* For ADAT, optical interface is only available. */
516 if (params->dig_in_fmt > 0) {
517 err = 1;
508 goto end; 518 goto end;
519 }
509 520
521 /* For S/PDIF, optical/coaxial interfaces are selectable. */
510 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); 522 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
511 if (err < 0) 523 if (err < 0)
512 dev_err(&bebob->unit->device, 524 dev_err(&bebob->unit->device,
513 "fail to set digital input interface: %d\n", err); 525 "fail to set digital input interface: %d\n", err);
526 err = 1;
514end: 527end:
515 special_stream_formation_set(bebob); 528 special_stream_formation_set(bebob);
516 mutex_unlock(&bebob->mutex); 529 mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
525 .put = special_dig_in_iface_ctl_set 538 .put = special_dig_in_iface_ctl_set
526}; 539};
527 540
541/* Digital output interface control for special firmware */
542static char *const special_dig_out_iface_labels[] = {
543 "S/PDIF Optical and Coaxial", "ADAT Optical"
544};
528static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, 545static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
529 struct snd_ctl_elem_info *einf) 546 struct snd_ctl_elem_info *einf)
530{ 547{
531 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 548 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
532 einf->count = 1; 549 einf->count = 1;
533 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1; 550 einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
534 551
535 if (einf->value.enumerated.item >= einf->value.enumerated.items) 552 if (einf->value.enumerated.item >= einf->value.enumerated.items)
536 einf->value.enumerated.item = einf->value.enumerated.items - 1; 553 einf->value.enumerated.item = einf->value.enumerated.items - 1;
537 554
538 strcpy(einf->value.enumerated.name, 555 strcpy(einf->value.enumerated.name,
539 special_dig_iface_labels[einf->value.enumerated.item + 1]); 556 special_dig_out_iface_labels[einf->value.enumerated.item]);
540 557
541 return 0; 558 return 0;
542} 559}
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
558 unsigned int id; 575 unsigned int id;
559 int err; 576 int err;
560 577
561 mutex_lock(&bebob->mutex);
562
563 id = uval->value.enumerated.item[0]; 578 id = uval->value.enumerated.item[0];
579 if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
580 return -EINVAL;
581
582 mutex_lock(&bebob->mutex);
564 583
565 err = avc_maudio_set_special_clk(bebob, 584 err = avc_maudio_set_special_clk(bebob,
566 params->clk_src, 585 params->clk_src,
567 params->dig_in_fmt, 586 params->dig_in_fmt,
568 id, params->clk_lock); 587 id, params->clk_lock);
569 if (err >= 0) 588 if (err >= 0) {
570 special_stream_formation_set(bebob); 589 special_stream_formation_set(bebob);
590 err = 1;
591 }
571 592
572 mutex_unlock(&bebob->mutex); 593 mutex_unlock(&bebob->mutex);
573 return err; 594 return err;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 56ff9bebb577..476d3bf540a8 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
1526 goto out_unmap; 1526 goto out_unmap;
1527 } 1527 }
1528 1528
1529 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1530 vctrl_res.start, vgic_maint_irq);
1531 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1532
1533 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) { 1529 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1534 kvm_err("Cannot obtain VCPU resource\n"); 1530 kvm_err("Cannot obtain VCPU resource\n");
1535 ret = -ENXIO; 1531 ret = -ENXIO;
1536 goto out_unmap; 1532 goto out_unmap;
1537 } 1533 }
1534
1535 if (!PAGE_ALIGNED(vcpu_res.start)) {
1536 kvm_err("GICV physical address 0x%llx not page aligned\n",
1537 (unsigned long long)vcpu_res.start);
1538 ret = -ENXIO;
1539 goto out_unmap;
1540 }
1541
1542 if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
1543 kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
1544 (unsigned long long)resource_size(&vcpu_res),
1545 PAGE_SIZE);
1546 ret = -ENXIO;
1547 goto out_unmap;
1548 }
1549
1538 vgic_vcpu_base = vcpu_res.start; 1550 vgic_vcpu_base = vcpu_res.start;
1539 1551
1552 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1553 vctrl_res.start, vgic_maint_irq);
1554 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1555
1540 goto out; 1556 goto out;
1541 1557
1542out_unmap: 1558out_unmap: