aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-05 21:57:18 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-05 21:57:18 -0400
commite9011d086674caeedb0ffb6eb5b8bc5920821df3 (patch)
treeed5d06353d8dc1fa5f6da349808f8345ca2291a5
parentcfcfe22256d5a8a14924a1145d56017b043b554f (diff)
parentc78f77e20d2ba5d4d5e478e85a6fb42556893e2d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Conflicts: arch/sparc/mm/init_64.c Conflict was simple non-overlapping additions. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/input/event-codes.txt13
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile4
-rw-r--r--arch/arm/crypto/aesbs-glue.c10
-rw-r--r--arch/arm64/crypto/aes-glue.c12
-rw-r--r--arch/arm64/mm/init.c17
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S2
-rw-r--r--arch/blackfin/mach-bf533/boards/blackstamp.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537e.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537u.c1
-rw-r--r--arch/blackfin/mach-bf537/boards/tcm_bf537.c1
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c6
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c1
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c20
-rw-r--r--arch/blackfin/mach-bf609/include/mach/pm.h5
-rw-r--r--arch/blackfin/mach-bf609/pm.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/rtas_flash.c6
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c65
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c4
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/kernel/head.S6
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/pci/pci.c49
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sparc/Makefile3
-rw-r--r--arch/sparc/boot/Makefile4
-rw-r--r--arch/sparc/boot/install.sh50
-rw-r--r--arch/sparc/include/asm/tlbflush_64.h12
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/ldc.c2
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/sparc/math-emu/math_32.c2
-rw-r--r--arch/sparc/mm/init_64.c32
-rw-r--r--arch/x86/kernel/cpu/intel.c22
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c69
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c11
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/kprobes/core.c3
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/xtensa/kernel/vectors.S158
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c87
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c20
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c20
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/sbus/char/bbc_envctrl.c6
-rw-r--r--drivers/sbus/char/bbc_i2c.c11
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/tty/serial/sunsab.c9
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/direct-io.c14
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/xattr.c2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--kernel/events/core.c32
-rw-r--r--kernel/kprobes.c14
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c3
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c102
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/truncate.c11
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c44
-rw-r--r--net/batman-adv/soft-interface.c60
-rw-r--r--net/batman-adv/translation-table.c26
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/netfilter/nf_tables_api.c140
-rw-r--r--net/netfilter/nf_tables_core.c10
-rw-r--r--net/sched/cls_u32.c19
-rw-r--r--sound/firewire/bebob/bebob_maudio.c53
173 files changed, 1476 insertions, 663 deletions
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c69648d..c587a966413e 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT 281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
282device. 282device.
283 283
284INPUT_PROP_TOPBUTTONPAD:
285-----------------------
286Some laptops, most notably the Lenovo *40 series provide a trackstick
287device but do not have physical buttons associated with the trackstick
288device. Instead, the top area of the touchpad is marked to show
289visual/haptic areas for left, middle, right buttons intended to be used
290with the trackstick.
291
292If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
293accordingly. This property does not affect kernel behavior.
294The kernel does not provide button emulation for such devices but treats
295them as any other INPUT_PROP_BUTTONPAD device.
296
284Guidelines: 297Guidelines:
285========== 298==========
286The guidelines below ensure proper single-touch and multi-finger functionality. 299The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/MAINTAINERS b/MAINTAINERS
index 61a8f486306b..86efa7e213c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6956,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6956S: Maintained 6956S: Maintained
6957F: drivers/pinctrl/pinctrl-at91.c 6957F: drivers/pinctrl/pinctrl-at91.c
6958 6958
6959PIN CONTROLLER - RENESAS
6960M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6961L: linux-sh@vger.kernel.org
6962S: Maintained
6963F: drivers/pinctrl/sh-pfc/
6964
6959PIN CONTROLLER - SAMSUNG 6965PIN CONTROLLER - SAMSUNG
6960M: Tomasz Figa <t.figa@samsung.com> 6966M: Tomasz Figa <t.figa@samsung.com>
6961M: Thomas Abraham <thomas.abraham@linaro.org> 6967M: Thomas Abraham <thomas.abraham@linaro.org>
@@ -8019,6 +8025,16 @@ F: drivers/ata/
8019F: include/linux/ata.h 8025F: include/linux/ata.h
8020F: include/linux/libata.h 8026F: include/linux/libata.h
8021 8027
8028SERIAL ATA AHCI PLATFORM devices support
8029M: Hans de Goede <hdegoede@redhat.com>
8030M: Tejun Heo <tj@kernel.org>
8031L: linux-ide@vger.kernel.org
8032T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8033S: Supported
8034F: drivers/ata/ahci_platform.c
8035F: drivers/ata/libahci_platform.c
8036F: include/linux/ahci_platform.h
8037
8022SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8038SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
8023M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8039M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
8024L: linux-scsi@vger.kernel.org 8040L: linux-scsi@vger.kernel.org
diff --git a/Makefile b/Makefile
index 6b2774145d66..f6a7794e4db4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc6 4EXTRAVERSION = -rc7
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -688,6 +688,8 @@ KBUILD_CFLAGS += -fomit-frame-pointer
688endif 688endif
689endif 689endif
690 690
691KBUILD_CFLAGS += $(call cc-option, -fno-var-tracking-assignments)
692
691ifdef CONFIG_DEBUG_INFO 693ifdef CONFIG_DEBUG_INFO
692KBUILD_CFLAGS += -g 694KBUILD_CFLAGS += -g
693KBUILD_AFLAGS += -Wa,-gdwarf-2 695KBUILD_AFLAGS += -Wa,-gdwarf-2
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
index 4522366da759..15468fbbdea3 100644
--- a/arch/arm/crypto/aesbs-glue.c
+++ b/arch/arm/crypto/aesbs-glue.c
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
137 dst += AES_BLOCK_SIZE; 137 dst += AES_BLOCK_SIZE;
138 } while (--blocks); 138 } while (--blocks);
139 } 139 }
140 err = blkcipher_walk_done(desc, &walk, 0); 140 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
141 } 141 }
142 return err; 142 return err;
143} 143}
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 158 bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
159 walk.nbytes, &ctx->dec, walk.iv); 159 walk.nbytes, &ctx->dec, walk.iv);
160 kernel_neon_end(); 160 kernel_neon_end();
161 err = blkcipher_walk_done(desc, &walk, 0); 161 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
162 } 162 }
163 while (walk.nbytes) { 163 while (walk.nbytes) {
164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE; 164 u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
182 dst += AES_BLOCK_SIZE; 182 dst += AES_BLOCK_SIZE;
183 src += AES_BLOCK_SIZE; 183 src += AES_BLOCK_SIZE;
184 } while (--blocks); 184 } while (--blocks);
185 err = blkcipher_walk_done(desc, &walk, 0); 185 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
186 } 186 }
187 return err; 187 return err;
188} 188}
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, 268 bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
269 walk.nbytes, &ctx->enc, walk.iv); 269 walk.nbytes, &ctx->enc, walk.iv);
270 kernel_neon_end(); 270 kernel_neon_end();
271 err = blkcipher_walk_done(desc, &walk, 0); 271 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
272 } 272 }
273 return err; 273 return err;
274} 274}
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, 292 bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
293 walk.nbytes, &ctx->dec, walk.iv); 293 walk.nbytes, &ctx->dec, walk.iv);
294 kernel_neon_end(); 294 kernel_neon_end();
295 err = blkcipher_walk_done(desc, &walk, 0); 295 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
296 } 296 }
297 return err; 297 return err;
298} 298}
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 60f2f4c12256..79cd911ef88c 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 106 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 107 aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
108 (u8 *)ctx->key_enc, rounds, blocks, first); 108 (u8 *)ctx->key_enc, rounds, blocks, first);
109 err = blkcipher_walk_done(desc, &walk, 0); 109 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
110 } 110 }
111 kernel_neon_end(); 111 kernel_neon_end();
112 return err; 112 return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { 128 for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 129 aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
130 (u8 *)ctx->key_dec, rounds, blocks, first); 130 (u8 *)ctx->key_dec, rounds, blocks, first);
131 err = blkcipher_walk_done(desc, &walk, 0); 131 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
132 } 132 }
133 kernel_neon_end(); 133 kernel_neon_end();
134 return err; 134 return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 151 aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv, 152 (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
153 first); 153 first);
154 err = blkcipher_walk_done(desc, &walk, 0); 154 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
155 } 155 }
156 kernel_neon_end(); 156 kernel_neon_end();
157 return err; 157 return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 174 aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv, 175 (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
176 first); 176 first);
177 err = blkcipher_walk_done(desc, &walk, 0); 177 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
178 } 178 }
179 kernel_neon_end(); 179 kernel_neon_end();
180 return err; 180 return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 243 aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
244 (u8 *)ctx->key1.key_enc, rounds, blocks, 244 (u8 *)ctx->key1.key_enc, rounds, blocks,
245 (u8 *)ctx->key2.key_enc, walk.iv, first); 245 (u8 *)ctx->key2.key_enc, walk.iv, first);
246 err = blkcipher_walk_done(desc, &walk, 0); 246 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
247 } 247 }
248 kernel_neon_end(); 248 kernel_neon_end();
249 249
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 267 aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
268 (u8 *)ctx->key1.key_dec, rounds, blocks, 268 (u8 *)ctx->key1.key_dec, rounds, blocks,
269 (u8 *)ctx->key2.key_enc, walk.iv, first); 269 (u8 *)ctx->key2.key_enc, walk.iv, first);
270 err = blkcipher_walk_done(desc, &walk, 0); 270 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
271 } 271 }
272 kernel_neon_end(); 272 kernel_neon_end();
273 273
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a69262..e90c5426fe14 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
60early_param("initrd", early_initrd); 60early_param("initrd", early_initrd);
61#endif 61#endif
62 62
63/*
64 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
65 * currently assumes that for memory starting above 4G, 32-bit devices will
66 * use a DMA offset.
67 */
68static phys_addr_t max_zone_dma_phys(void)
69{
70 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
71 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
72}
73
63static void __init zone_sizes_init(unsigned long min, unsigned long max) 74static void __init zone_sizes_init(unsigned long min, unsigned long max)
64{ 75{
65 struct memblock_region *reg; 76 struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
70 81
71 /* 4GB maximum for 32-bit only capable devices */ 82 /* 4GB maximum for 32-bit only capable devices */
72 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 83 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
73 unsigned long max_dma_phys = 84 max_dma = PFN_DOWN(max_zone_dma_phys());
74 (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
76 zone_size[ZONE_DMA] = max_dma - min; 85 zone_size[ZONE_DMA] = max_dma - min;
77 } 86 }
78 zone_size[ZONE_NORMAL] = max - max_dma; 87 zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
146 155
147 /* 4GB maximum for 32-bit only capable devices */ 156 /* 4GB maximum for 32-bit only capable devices */
148 if (IS_ENABLED(CONFIG_ZONE_DMA)) 157 if (IS_ENABLED(CONFIG_ZONE_DMA))
149 dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; 158 dma_phys_limit = max_zone_dma_phys();
150 dma_contiguous_reserve(dma_phys_limit); 159 dma_contiguous_reserve(dma_phys_limit);
151 160
152 memblock_allow_resize(); 161 memblock_allow_resize();
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index a7e9bfd84183..fcec5ce71392 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
102CONFIG_I2C_BLACKFIN_TWI=y 102CONFIG_I2C_BLACKFIN_TWI=y
103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 103CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
104CONFIG_SPI=y 104CONFIG_SPI=y
105CONFIG_SPI_BFIN_V3=y 105CONFIG_SPI_ADI_V3=y
106CONFIG_GPIOLIB=y 106CONFIG_GPIOLIB=y
107CONFIG_GPIO_SYSFS=y 107CONFIG_GPIO_SYSFS=y
108# CONFIG_HWMON is not set 108# CONFIG_HWMON is not set
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index ba35864b2b74..c9eec84aa258 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -145,7 +145,7 @@ SECTIONS
145 145
146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 146 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
147#else 147#else
148 .init.data : AT(__data_lma + __data_len) 148 .init.data : AT(__data_lma + __data_len + 32)
149 { 149 {
150 __sinitdata = .; 150 __sinitdata = .;
151 INIT_DATA 151 INIT_DATA
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 63b0e4fe760c..0ccf0cf4daaf 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -20,6 +20,7 @@
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/spi/flash.h> 21#include <linux/spi/flash.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/gpio.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index c65c6dbda3da..1e7290ef3525 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index af58454b4bff..c7495dc74690 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index a0211225748d..6b988ad653d8 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -21,6 +21,7 @@
21#endif 21#endif
22#include <linux/ata_platform.h> 22#include <linux/ata_platform.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/gpio.h>
24#include <asm/dma.h> 25#include <asm/dma.h>
25#include <asm/bfin5xx_spi.h> 26#include <asm/bfin5xx_spi.h>
26#include <asm/portmux.h> 27#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 90138e6112c1..1fe7ff286619 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2118 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2119 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), 2120 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"),
2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2121 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2122 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2123 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), 2124 PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), 2140 PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"),
2141#endif 2141#endif
2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), 2142 PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"),
2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), 2143 PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2144 PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"),
2145 PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"),
2144}; 2146};
2145 2147
2146static int __init ezkit_init(void) 2148static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 430b16d5ccb1..6ab951534d79 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -44,6 +44,7 @@
44#include <linux/spi/flash.h> 44#include <linux/spi/flash.h>
45#include <linux/irq.h> 45#include <linux/irq.h>
46#include <linux/interrupt.h> 46#include <linux/interrupt.h>
47#include <linux/gpio.h>
47#include <linux/jiffies.h> 48#include <linux/jiffies.h>
48#include <linux/i2c-pca-platform.h> 49#include <linux/i2c-pca-platform.h>
49#include <linux/delay.h> 50#include <linux/delay.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 9f777df4cacc..e862f7823e68 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -18,6 +18,7 @@
18#endif 18#endif
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/gpio.h>
21#include <asm/dma.h> 22#include <asm/dma.h>
22#include <asm/bfin5xx_spi.h> 23#include <asm/bfin5xx_spi.h>
23#include <asm/portmux.h> 24#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 88dee43e7abe..2de71e8c104b 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -14,6 +14,7 @@
14#include <linux/spi/spi.h> 14#include <linux/spi/spi.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/gpio.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/bfin5xx_spi.h> 20#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index 1ba4600de69f..e2c0b024ce88 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
698{ 698{
699#define CONFIG_SMC_GCTL_VAL 0x00000010 699#define CONFIG_SMC_GCTL_VAL 0x00000010
700 700
701 if (!devm_pinctrl_get_select_default(&pdev->dev))
702 return -EBUSY;
703 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); 701 bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
704 bfin_write32(SMC_B0CTL, 0x01002011); 702 bfin_write32(SMC_B0CTL, 0x01002011);
705 bfin_write32(SMC_B0TIM, 0x08170977); 703 bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
709 707
710void bf609_nor_flash_exit(struct platform_device *pdev) 708void bf609_nor_flash_exit(struct platform_device *pdev)
711{ 709{
712 devm_pinctrl_put(pdev->dev.pins->p);
713 bfin_write32(SMC_GCTL, 0); 710 bfin_write32(SMC_GCTL, 0);
714} 711}
715 712
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
2058 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), 2055 PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"),
2059 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), 2056 PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"),
2060 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), 2057 PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"),
2061 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), 2058 PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2062 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2059 PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
2063#if IS_ENABLED(CONFIG_VIDEO_MT9M114) 2060 PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2064 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), 2061 PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
2065#elif IS_ENABLED(CONFIG_VIDEO_VS6624) 2062 PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), 2063 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2067#else 2064 PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), 2065 PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
2069#endif
2070 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), 2066 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"),
2071 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), 2067 PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"),
2072 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), 2068 PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"),
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 3ca0fb965636..a1efd936dd30 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -10,6 +10,7 @@
10#define __MACH_BF609_PM_H__ 10#define __MACH_BF609_PM_H__
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/platform_device.h>
13 14
14extern int bfin609_pm_enter(suspend_state_t state); 15extern int bfin609_pm_enter(suspend_state_t state);
15extern int bf609_pm_prepare(void); 16extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
19void bfin_sec_raise_irq(unsigned int sid); 20void bfin_sec_raise_irq(unsigned int sid);
20void coreb_enable(void); 21void coreb_enable(void);
21 22
22int bf609_nor_flash_init(void); 23int bf609_nor_flash_init(struct platform_device *pdev);
23void bf609_nor_flash_exit(void); 24void bf609_nor_flash_exit(struct platform_device *pdev);
24#endif 25#endif
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index 0cdd6955c7be..b1bfcf434d16 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) 291#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
292static int smc_pm_syscore_suspend(void) 292static int smc_pm_syscore_suspend(void)
293{ 293{
294 bf609_nor_flash_exit(); 294 bf609_nor_flash_exit(NULL);
295 return 0; 295 return 0;
296} 296}
297 297
298static void smc_pm_syscore_resume(void) 298static void smc_pm_syscore_resume(void)
299{ 299{
300 bf609_nor_flash_init(); 300 bf609_nor_flash_init(NULL);
301} 301}
302 302
303static struct syscore_ops smc_pm_syscore_ops = { 303static struct syscore_ops smc_pm_syscore_ops = {
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 867b7cef204c..1f94784eab6d 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
1208 1208
1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); 1209 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1210 1210
1211 bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1212
1213 /* Enable interrupts IVG7-15 */ 1211 /* Enable interrupts IVG7-15 */
1214 bfin_irq_flags |= IMASK_IVG15 | 1212 bfin_irq_flags |= IMASK_IVG15 |
1215 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 1213 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
69#define SA_NOMASK SA_NODEFER 69#define SA_NOMASK SA_NODEFER
70#define SA_ONESHOT SA_RESETHAND 70#define SA_ONESHOT SA_RESETHAND
71 71
72#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
73
74#define MINSIGSTKSZ 2048 72#define MINSIGSTKSZ 2048
75#define SIGSTKSZ 8192 73#define SIGSTKSZ 8192
76 74
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad0fba0..0bef864264c0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
728#endif 728#endif
729 729
730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
731 memset(empty_zero_page, 0, PAGE_SIZE);
732} 731}
733 732
734static void __init gateway_init(void) 733static void __init gateway_init(void)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc2347774f0a..0fdd7eece6d9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) 448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
450#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
450#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 451#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
451 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 452 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
452 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 453 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b48ce9..d645428a65a4 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
198 return rb; 198 return rb;
199} 199}
200 200
201static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) 201static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
202 bool is_base_size)
202{ 203{
204
203 int size, a_psize; 205 int size, a_psize;
204 /* Look at the 8 bit LP value */ 206 /* Look at the 8 bit LP value */
205 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); 207 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
214 continue; 216 continue;
215 217
216 a_psize = __hpte_actual_psize(lp, size); 218 a_psize = __hpte_actual_psize(lp, size);
217 if (a_psize != -1) 219 if (a_psize != -1) {
220 if (is_base_size)
221 return 1ul << mmu_psize_defs[size].shift;
218 return 1ul << mmu_psize_defs[a_psize].shift; 222 return 1ul << mmu_psize_defs[a_psize].shift;
223 }
219 } 224 }
220 225
221 } 226 }
222 return 0; 227 return 0;
223} 228}
224 229
230static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
231{
232 return __hpte_page_size(h, l, 0);
233}
234
235static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
236{
237 return __hpte_page_size(h, l, 1);
238}
239
225static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 240static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
226{ 241{
227 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 242 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014dde821..c2b4dcf23d03 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <asm/pgtable-ppc64.h> 23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h> 24#include <asm/bug.h>
25#include <asm/processor.h>
25 26
26/* 27/*
27 * Segment table 28 * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
496 */ 497 */
497struct subpage_prot_table { 498struct subpage_prot_table {
498 unsigned long maxaddr; /* only addresses < this are protected */ 499 unsigned long maxaddr; /* only addresses < this are protected */
499 unsigned int **protptrs[2]; 500 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
500 unsigned int *low_prot[4]; 501 unsigned int *low_prot[4];
501}; 502};
502 503
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9ea266eae33e..7e4612528546 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -277,6 +277,8 @@ n:
277 .globl n; \ 277 .globl n; \
278n: 278n:
279 279
280#define _GLOBAL_TOC(name) _GLOBAL(name)
281
280#define _KPROBE(n) \ 282#define _KPROBE(n) \
281 .section ".kprobes.text","a"; \ 283 .section ".kprobes.text","a"; \
282 .globl n; \ 284 .globl n; \
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b4c2fa..0c157642c2a1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
527 .machine_check_early = __machine_check_early_realmode_p8, 527 .machine_check_early = __machine_check_early_realmode_p8,
528 .platform = "power8", 528 .platform = "power8",
529 }, 529 },
530 { /* Power8 DD1: Does not support doorbell IPIs */
531 .pvr_mask = 0xffffff00,
532 .pvr_value = 0x004d0100,
533 .cpu_name = "POWER8 (raw)",
534 .cpu_features = CPU_FTRS_POWER8_DD1,
535 .cpu_user_features = COMMON_USER_POWER8,
536 .cpu_user_features2 = COMMON_USER2_POWER8,
537 .mmu_features = MMU_FTRS_POWER8,
538 .icache_bsize = 128,
539 .dcache_bsize = 128,
540 .num_pmcs = 6,
541 .pmc_type = PPC_PMC_IBM,
542 .oprofile_cpu_type = "ppc64/power8",
543 .oprofile_type = PPC_OPROFILE_INVALID,
544 .cpu_setup = __setup_cpu_power8,
545 .cpu_restore = __restore_cpu_power8,
546 .flush_tlb = __flush_tlb_power8,
547 .machine_check_early = __machine_check_early_realmode_p8,
548 .platform = "power8",
549 },
530 { /* Power8 */ 550 { /* Power8 */
531 .pvr_mask = 0xffff0000, 551 .pvr_mask = 0xffff0000,
532 .pvr_value = 0x004d0000, 552 .pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 658e89d2025b..db2b482af658 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
611 for (f = flist; f; f = next) { 611 for (f = flist; f; f = next) {
612 /* Translate data addrs to absolute */ 612 /* Translate data addrs to absolute */
613 for (i = 0; i < f->num_blocks; i++) { 613 for (i = 0; i < f->num_blocks; i++) {
614 f->blocks[i].data = (char *)__pa(f->blocks[i].data); 614 f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
615 image_size += f->blocks[i].length; 615 image_size += f->blocks[i].length;
616 f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
616 } 617 }
617 next = f->next; 618 next = f->next;
618 /* Don't translate NULL pointer for last entry */ 619 /* Don't translate NULL pointer for last entry */
619 if (f->next) 620 if (f->next)
620 f->next = (struct flash_block_list *)__pa(f->next); 621 f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
621 else 622 else
622 f->next = NULL; 623 f->next = NULL;
623 /* make num_blocks into the version/length field */ 624 /* make num_blocks into the version/length field */
624 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); 625 f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
626 f->num_blocks = cpu_to_be64(f->num_blocks);
625 } 627 }
626 628
627 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); 629 printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..68468d695f12 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1562 goto out; 1562 goto out;
1563 } 1563 }
1564 if (!rma_setup && is_vrma_hpte(v)) { 1564 if (!rma_setup && is_vrma_hpte(v)) {
1565 unsigned long psize = hpte_page_size(v, r); 1565 unsigned long psize = hpte_base_page_size(v, r);
1566 unsigned long senc = slb_pgsize_encoding(psize); 1566 unsigned long senc = slb_pgsize_encoding(psize);
1567 unsigned long lpcr; 1567 unsigned long lpcr;
1568 1568
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..5a24d3c2b6b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
814 r = hpte[i+1]; 814 r = hpte[i+1];
815 815
816 /* 816 /*
817 * Check the HPTE again, including large page size 817 * Check the HPTE again, including base page size
818 * Since we don't currently allow any MPSS (mixed
819 * page-size segment) page sizes, it is sufficient
820 * to check against the actual page size.
821 */ 818 */
822 if ((v & valid) && (v & mask) == val && 819 if ((v & valid) && (v & mask) == val &&
823 hpte_page_size(v, r) == (1ul << pshift)) 820 hpte_base_page_size(v, r) == (1ul << pshift))
824 /* Return with the HPTE still locked */ 821 /* Return with the HPTE still locked */
825 return (hash << 3) + (i >> 1); 822 return (hash << 3) + (i >> 1);
826 823
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..558a67df8126 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
48 * 48 *
49 * LR = return address to continue at after eventually re-enabling MMU 49 * LR = return address to continue at after eventually re-enabling MMU
50 */ 50 */
51_GLOBAL(kvmppc_hv_entry_trampoline) 51_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
52 mflr r0 52 mflr r0
53 std r0, PPC_LR_STKOFF(r1) 53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1) 54 stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#if defined(_CALL_ELF) && _CALL_ELF == 2
29#define FUNC(name) name
30#else
28#define FUNC(name) GLUE(.,name) 31#define FUNC(name) GLUE(.,name)
32#endif
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU 33#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30 34
31#elif defined(CONFIG_PPC_BOOK3S_32) 35#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
36 36
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#if defined(_CALL_ELF) && _CALL_ELF == 2
40#define FUNC(name) name
41#else
39#define FUNC(name) GLUE(.,name) 42#define FUNC(name) GLUE(.,name)
43#endif
40 44
41#elif defined(CONFIG_PPC_BOOK3S_32) 45#elif defined(CONFIG_PPC_BOOK3S_32)
42 46
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
146 * On entry, r4 contains the guest shadow MSR 150 * On entry, r4 contains the guest shadow MSR
147 * MSR.EE has to be 0 when calling this function 151 * MSR.EE has to be 0 when calling this function
148 */ 152 */
149_GLOBAL(kvmppc_entry_trampoline) 153_GLOBAL_TOC(kvmppc_entry_trampoline)
150 mfmsr r5 154 mfmsr r5
151 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 155 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
152 toreal(r7) 156 toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba992b3..ef27fbd5d9c5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
23 u32 irq, server, priority; 23 u32 irq, server, priority;
24 int rc; 24 int rc;
25 25
26 if (args->nargs != 3 || args->nret != 1) { 26 if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
27 rc = -3; 27 rc = -3;
28 goto out; 28 goto out;
29 } 29 }
30 30
31 irq = args->args[0]; 31 irq = be32_to_cpu(args->args[0]);
32 server = args->args[1]; 32 server = be32_to_cpu(args->args[1]);
33 priority = args->args[2]; 33 priority = be32_to_cpu(args->args[2]);
34 34
35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); 35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
36 if (rc) 36 if (rc)
37 rc = -3; 37 rc = -3;
38out: 38out:
39 args->rets[0] = rc; 39 args->rets[0] = cpu_to_be32(rc);
40} 40}
41 41
42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) 42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
44 u32 irq, server, priority; 44 u32 irq, server, priority;
45 int rc; 45 int rc;
46 46
47 if (args->nargs != 1 || args->nret != 3) { 47 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
48 rc = -3; 48 rc = -3;
49 goto out; 49 goto out;
50 } 50 }
51 51
52 irq = args->args[0]; 52 irq = be32_to_cpu(args->args[0]);
53 53
54 server = priority = 0; 54 server = priority = 0;
55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); 55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
58 goto out; 58 goto out;
59 } 59 }
60 60
61 args->rets[1] = server; 61 args->rets[1] = cpu_to_be32(server);
62 args->rets[2] = priority; 62 args->rets[2] = cpu_to_be32(priority);
63out: 63out:
64 args->rets[0] = rc; 64 args->rets[0] = cpu_to_be32(rc);
65} 65}
66 66
67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) 67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
69 u32 irq; 69 u32 irq;
70 int rc; 70 int rc;
71 71
72 if (args->nargs != 1 || args->nret != 1) { 72 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
73 rc = -3; 73 rc = -3;
74 goto out; 74 goto out;
75 } 75 }
76 76
77 irq = args->args[0]; 77 irq = be32_to_cpu(args->args[0]);
78 78
79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); 79 rc = kvmppc_xics_int_off(vcpu->kvm, irq);
80 if (rc) 80 if (rc)
81 rc = -3; 81 rc = -3;
82out: 82out:
83 args->rets[0] = rc; 83 args->rets[0] = cpu_to_be32(rc);
84} 84}
85 85
86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) 86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
88 u32 irq; 88 u32 irq;
89 int rc; 89 int rc;
90 90
91 if (args->nargs != 1 || args->nret != 1) { 91 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
92 rc = -3; 92 rc = -3;
93 goto out; 93 goto out;
94 } 94 }
95 95
96 irq = args->args[0]; 96 irq = be32_to_cpu(args->args[0]);
97 97
98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); 98 rc = kvmppc_xics_int_on(vcpu->kvm, irq);
99 if (rc) 99 if (rc)
100 rc = -3; 100 rc = -3;
101out: 101out:
102 args->rets[0] = rc; 102 args->rets[0] = cpu_to_be32(rc);
103} 103}
104#endif /* CONFIG_KVM_XICS */ 104#endif /* CONFIG_KVM_XICS */
105 105
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
205 return rc; 205 return rc;
206} 206}
207 207
208static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
209{
210#ifdef __LITTLE_ENDIAN__
211 int i;
212
213 args->token = be32_to_cpu(args->token);
214 args->nargs = be32_to_cpu(args->nargs);
215 args->nret = be32_to_cpu(args->nret);
216 for (i = 0; i < args->nargs; i++)
217 args->args[i] = be32_to_cpu(args->args[i]);
218#endif
219}
220
221static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
222{
223#ifdef __LITTLE_ENDIAN__
224 int i;
225
226 for (i = 0; i < args->nret; i++)
227 args->args[i] = cpu_to_be32(args->args[i]);
228 args->token = cpu_to_be32(args->token);
229 args->nargs = cpu_to_be32(args->nargs);
230 args->nret = cpu_to_be32(args->nret);
231#endif
232}
233
234int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) 208int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
235{ 209{
236 struct rtas_token_definition *d; 210 struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
249 if (rc) 223 if (rc)
250 goto fail; 224 goto fail;
251 225
252 kvmppc_rtas_swap_endian_in(&args);
253
254 /* 226 /*
255 * args->rets is a pointer into args->args. Now that we've 227 * args->rets is a pointer into args->args. Now that we've
256 * copied args we need to fix it up to point into our copy, 228 * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
258 * value so we can restore it on the way out. 230 * value so we can restore it on the way out.
259 */ 231 */
260 orig_rets = args.rets; 232 orig_rets = args.rets;
261 args.rets = &args.args[args.nargs]; 233 args.rets = &args.args[be32_to_cpu(args.nargs)];
262 234
263 mutex_lock(&vcpu->kvm->lock); 235 mutex_lock(&vcpu->kvm->lock);
264 236
265 rc = -ENOENT; 237 rc = -ENOENT;
266 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { 238 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
267 if (d->token == args.token) { 239 if (d->token == be32_to_cpu(args.token)) {
268 d->handler->handler(vcpu, &args); 240 d->handler->handler(vcpu, &args);
269 rc = 0; 241 rc = 0;
270 break; 242 break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
275 247
276 if (rc == 0) { 248 if (rc == 0) {
277 args.rets = orig_rets; 249 args.rets = orig_rets;
278 kvmppc_rtas_swap_endian_out(&args);
279 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); 250 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
280 if (rc) 251 if (rc)
281 goto fail; 252 goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..86903d3f5a03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
473 if (printk_ratelimit()) 473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n", 474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn); 475 __func__, (long)gfn, pfn);
476 return -EINVAL; 476 ret = -EINVAL;
477 goto out;
477 } 478 }
478 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 479 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
479 480
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96befbf..43435c6892fb 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@ _GLOBAL(memset)
77 stb r4,0(r6) 77 stb r4,0(r6)
78 blr 78 blr
79 79
80_GLOBAL(memmove) 80_GLOBAL_TOC(memmove)
81 cmplw 0,r3,r4 81 cmplw 0,r3,r4
82 bgt backwards_memcpy 82 bgt backwards_memcpy
83 b memcpy 83 b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46dd0b7..5c09f365c842 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1198 sh = regs->gpr[rb] & 0x3f; 1198 sh = regs->gpr[rb] & 0x3f;
1199 ival = (signed int) regs->gpr[rd]; 1199 ival = (signed int) regs->gpr[rd];
1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1201 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) 1201 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1202 regs->xer |= XER_CA; 1202 regs->xer |= XER_CA;
1203 else 1203 else
1204 regs->xer &= ~XER_CA; 1204 regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1208 sh = rb; 1208 sh = rb;
1209 ival = (signed int) regs->gpr[rd]; 1209 ival = (signed int) regs->gpr[rd];
1210 regs->gpr[ra] = ival >> sh; 1210 regs->gpr[ra] = ival >> sh;
1211 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1211 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1212 regs->xer |= XER_CA; 1212 regs->xer |= XER_CA;
1213 else 1213 else
1214 regs->xer &= ~XER_CA; 1214 regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1216 1216
1217#ifdef __powerpc64__ 1217#ifdef __powerpc64__
1218 case 27: /* sld */ 1218 case 27: /* sld */
1219 sh = regs->gpr[rd] & 0x7f; 1219 sh = regs->gpr[rb] & 0x7f;
1220 if (sh < 64) 1220 if (sh < 64)
1221 regs->gpr[ra] = regs->gpr[rd] << sh; 1221 regs->gpr[ra] = regs->gpr[rd] << sh;
1222 else 1222 else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1235 sh = regs->gpr[rb] & 0x7f; 1235 sh = regs->gpr[rb] & 0x7f;
1236 ival = (signed long int) regs->gpr[rd]; 1236 ival = (signed long int) regs->gpr[rd];
1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1238 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) 1238 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1239 regs->xer |= XER_CA; 1239 regs->xer |= XER_CA;
1240 else 1240 else
1241 regs->xer &= ~XER_CA; 1241 regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1246 sh = rb | ((instr & 2) << 4); 1246 sh = rb | ((instr & 2) << 4);
1247 ival = (signed long int) regs->gpr[rd]; 1247 ival = (signed long int) regs->gpr[rd];
1248 regs->gpr[ra] = ival >> sh; 1248 regs->gpr[ra] = ival >> sh;
1249 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1249 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1250 regs->xer |= XER_CA; 1250 regs->xer |= XER_CA;
1251 else 1251 else
1252 regs->xer &= ~XER_CA; 1252 regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6b0641c3f03f..fe52db2eea6a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
1307 out_enable: 1307 out_enable:
1308 pmao_restore_workaround(ebb); 1308 pmao_restore_workaround(ebb);
1309 1309
1310 if (ppmu->flags & PPMU_ARCH_207S)
1311 mtspr(SPRN_MMCR2, 0);
1312
1310 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); 1313 mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
1311 1314
1312 mb(); 1315 mb();
@@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu)
1315 1318
1316 write_mmcr0(cpuhw, mmcr0); 1319 write_mmcr0(cpuhw, mmcr0);
1317 1320
1318 if (ppmu->flags & PPMU_ARCH_207S)
1319 mtspr(SPRN_MMCR2, 0);
1320
1321 /* 1321 /*
1322 * Enable instruction sampling if necessary 1322 * Enable instruction sampling if necessary
1323 */ 1323 */
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 10268c41d830..0ad533b617f7 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
249 249
250 rc = opal_get_elog_size(&id, &size, &type); 250 rc = opal_get_elog_size(&id, &size, &type);
251 if (rc != OPAL_SUCCESS) { 251 if (rc != OPAL_SUCCESS) {
252 pr_err("ELOG: Opal log read failed\n"); 252 pr_err("ELOG: OPAL log info read failed\n");
253 return; 253 return;
254 } 254 }
255 255
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
257 log_id = be64_to_cpu(id); 257 log_id = be64_to_cpu(id);
258 elog_type = be64_to_cpu(type); 258 elog_type = be64_to_cpu(type);
259 259
260 BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); 260 WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
261 261
262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE) 262 if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
263 elog_size = OPAL_MAX_ERRLOG_SIZE; 263 elog_size = OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6a80b..2d0b4d68a40a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
86 } 86 }
87 87
88 of_node_set_flag(dn, OF_DYNAMIC); 88 of_node_set_flag(dn, OF_DYNAMIC);
89 of_node_init(dn);
89 90
90 return dn; 91 return dn;
91} 92}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb65d0aa..1c0a60d98867 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
69 69
70 np->properties = proplist; 70 np->properties = proplist;
71 of_node_set_flag(np, OF_DYNAMIC); 71 of_node_set_flag(np, OF_DYNAMIC);
72 of_node_init(np);
72 73
73 np->parent = derive_parent(path); 74 np->parent = derive_parent(path);
74 if (IS_ERR(np->parent)) { 75 if (IS_ERR(np->parent)) {
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70cd59e..18ea9e3f8142 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
51 return 0; 51 return 0;
52 52
53 asm volatile( 53 asm volatile(
54 "0: lfpc %1\n" 54 " lfpc %1\n"
55 " la %0,0\n" 55 "0: la %0,0\n"
56 "1:\n" 56 "1:\n"
57 EX_TABLE(0b,1b) 57 EX_TABLE(0b,1b)
58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); 58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d6784510..e88d35d74950 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100efea, 0xf46ce800, 0x00400000 440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efea, 0xf46c0000 442 .long 2, 0xc100eff2, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
444 .long 2, 0xc100efea, 0xf0680000 444 .long 2, 0xc100eff2, 0xf0680000
445#elif defined(CONFIG_MARCH_Z9_109) 445#elif defined(CONFIG_MARCH_Z9_109)
446 .long 1, 0xc100efc2 446 .long 1, 0xc100efc2
447#elif defined(CONFIG_MARCH_Z990) 447#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734b5b1..5dc7ad9e2fbf 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
334 unsigned long mask = PSW_MASK_USER; 334 unsigned long mask = PSW_MASK_USER;
335 335
336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; 336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
337 if ((data & ~mask) != PSW_USER_BITS) 337 if ((data ^ PSW_USER_BITS) & ~mask)
338 /* Invalid psw mask. */
339 return -EINVAL;
340 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
341 /* Invalid address-space-control bits */
338 return -EINVAL; 342 return -EINVAL;
339 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) 343 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
344 /* Invalid addressing mode bits */
340 return -EINVAL; 345 return -EINVAL;
341 } 346 }
342 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 347 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
672 677
673 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 678 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
674 /* Build a 64 bit psw mask from 31 bit mask. */ 679 /* Build a 64 bit psw mask from 31 bit mask. */
675 if ((tmp & ~mask) != PSW32_USER_BITS) 680 if ((tmp ^ PSW32_USER_BITS) & ~mask)
676 /* Invalid psw mask. */ 681 /* Invalid psw mask. */
677 return -EINVAL; 682 return -EINVAL;
683 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
684 /* Invalid address-space-control bits */
685 return -EINVAL;
678 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 686 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
679 (regs->psw.mask & PSW_MASK_BA) | 687 (regs->psw.mask & PSW_MASK_BA) |
680 (__u64)(tmp & mask) << 32; 688 (__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51eeb8d6..30de42730b2f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
48static LIST_HEAD(zpci_list); 48static LIST_HEAD(zpci_list);
49static DEFINE_SPINLOCK(zpci_list_lock); 49static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static void zpci_enable_irq(struct irq_data *data);
52static void zpci_disable_irq(struct irq_data *data);
53
54static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
55 .name = "zPCI", 52 .name = "zPCI",
56 .irq_unmask = zpci_enable_irq, 53 .irq_unmask = unmask_msi_irq,
57 .irq_mask = zpci_disable_irq, 54 .irq_mask = mask_msi_irq,
58}; 55};
59 56
60static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
244 return rc; 241 return rc;
245} 242}
246 243
247static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
248{
249 int offset, pos;
250 u32 mask_bits;
251
252 if (msi->msi_attrib.is_msix) {
253 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
254 PCI_MSIX_ENTRY_VECTOR_CTRL;
255 msi->masked = readl(msi->mask_base + offset);
256 writel(flag, msi->mask_base + offset);
257 } else if (msi->msi_attrib.maskbit) {
258 pos = (long) msi->mask_base;
259 pci_read_config_dword(msi->dev, pos, &mask_bits);
260 mask_bits &= ~(mask);
261 mask_bits |= flag & mask;
262 pci_write_config_dword(msi->dev, pos, mask_bits);
263 } else
264 return 0;
265
266 msi->msi_attrib.maskbit = !!flag;
267 return 1;
268}
269
270static void zpci_enable_irq(struct irq_data *data)
271{
272 struct msi_desc *msi = irq_get_msi_desc(data->irq);
273
274 zpci_msi_set_mask_bits(msi, 1, 0);
275}
276
277static void zpci_disable_irq(struct irq_data *data)
278{
279 struct msi_desc *msi = irq_get_msi_desc(data->irq);
280
281 zpci_msi_set_mask_bits(msi, 1, 1);
282}
283
284void pcibios_fixup_bus(struct pci_bus *bus) 244void pcibios_fixup_bus(struct pci_bus *bus)
285{ 245{
286} 246}
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
487 447
488 /* Release MSI interrupts */ 448 /* Release MSI interrupts */
489 list_for_each_entry(msi, &pdev->msi_list, list) { 449 list_for_each_entry(msi, &pdev->msi_list, list) {
490 zpci_msi_set_mask_bits(msi, 1, 1); 450 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1);
452 else
453 default_msi_mask_irq(msi, 1, 1);
491 irq_set_msi_desc(msi->irq, NULL); 454 irq_set_msi_desc(msi->irq, NULL);
492 irq_free_desc(msi->irq); 455 irq_free_desc(msi->irq);
493 msi->msg.address_lo = 0; 456 msi->msg.address_lo = 0;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4be07c..bf5b3f5f4962 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@ endif
32 32
33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) 33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ 34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
35 $(call cc-option,-m2a-nofpu,) 35 $(call cc-option,-m2a-nofpu,) \
36 $(call cc-option,-m4-nofpu,)
36cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) 37cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
37cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ 38cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
38 $(call cc-option,-mno-implicit-fp,-m4-nofpu) 39 $(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 9ff423678cbc..eaee14637d93 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -68,6 +68,9 @@ all: zImage
68image zImage uImage tftpboot.img vmlinux.aout: vmlinux 68image zImage uImage tftpboot.img vmlinux.aout: vmlinux
69 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 69 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
70 70
71install:
72 $(Q)$(MAKE) $(build)=$(boot) $@
73
71archclean: 74archclean:
72 $(Q)$(MAKE) $(clean)=$(boot) 75 $(Q)$(MAKE) $(clean)=$(boot)
73 76
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index 6e63afb128d9..6a4ceae5ec67 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -69,3 +69,7 @@ $(obj)/image: vmlinux FORCE
69$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE 69$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
70 $(call if_changed,elftoaout) 70 $(call if_changed,elftoaout)
71 $(call if_changed,piggy) 71 $(call if_changed,piggy)
72
73install:
74 sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/zImage \
75 System.map "$(INSTALL_PATH)"
diff --git a/arch/sparc/boot/install.sh b/arch/sparc/boot/install.sh
new file mode 100644
index 000000000000..b32851eae693
--- /dev/null
+++ b/arch/sparc/boot/install.sh
@@ -0,0 +1,50 @@
1#!/bin/sh
2#
3# This file is subject to the terms and conditions of the GNU General Public
4# License. See the file "COPYING" in the main directory of this archive
5# for more details.
6#
7# Copyright (C) 1995 by Linus Torvalds
8#
9# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
10#
11# "make install" script for SPARC architecture
12#
13# Arguments:
14# $1 - kernel version
15# $2 - kernel image file
16# $3 - kernel map file
17# $4 - default install path (blank if root directory)
18#
19
20verify () {
21 if [ ! -f "$1" ]; then
22 echo "" 1>&2
23 echo " *** Missing file: $1" 1>&2
24 echo ' *** You need to run "make" before "make install".' 1>&2
25 echo "" 1>&2
26 exit 1
27 fi
28}
29
30# Make sure the files actually exist
31verify "$2"
32verify "$3"
33
34# User may have a custom install script
35
36if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
37if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
38
39# Default install - same as make zlilo
40
41if [ -f $4/vmlinuz ]; then
42 mv $4/vmlinuz $4/vmlinuz.old
43fi
44
45if [ -f $4/System.map ]; then
46 mv $4/System.map $4/System.old
47fi
48
49cat $2 > $4/vmlinuz
50cp $3 $4/System.map
diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h
index 816d8202fa0a..dea1cfa2122b 100644
--- a/arch/sparc/include/asm/tlbflush_64.h
+++ b/arch/sparc/include/asm/tlbflush_64.h
@@ -34,6 +34,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
34{ 34{
35} 35}
36 36
37void flush_tlb_kernel_range(unsigned long start, unsigned long end);
38
37#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 39#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
38 40
39void flush_tlb_pending(void); 41void flush_tlb_pending(void);
@@ -48,11 +50,6 @@ void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
48 50
49#ifndef CONFIG_SMP 51#ifndef CONFIG_SMP
50 52
51#define flush_tlb_kernel_range(start,end) \
52do { flush_tsb_kernel_range(start,end); \
53 __flush_tlb_kernel_range(start,end); \
54} while (0)
55
56static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) 53static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
57{ 54{
58 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); 55 __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
@@ -63,11 +60,6 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
63void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); 60void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
64void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); 61void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
65 62
66#define flush_tlb_kernel_range(start, end) \
67do { flush_tsb_kernel_range(start,end); \
68 smp_flush_tlb_kernel_range(start, end); \
69} while (0)
70
71#define global_flush_tlb_page(mm, vaddr) \ 63#define global_flush_tlb_page(mm, vaddr) \
72 smp_flush_tlb_page(mm, vaddr) 64 smp_flush_tlb_page(mm, vaddr)
73 65
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274fb961a..42f2bca1d338 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
410#define __NR_finit_module 342 410#define __NR_finit_module 342
411#define __NR_sched_setattr 343 411#define __NR_sched_setattr 343
412#define __NR_sched_getattr 344 412#define __NR_sched_getattr 344
413#define __NR_renameat2 345
413 414
414#define NR_syscalls 345 415#define NR_syscalls 346
415 416
416/* Bitmask values returned from kern_features system call. */ 417/* Bitmask values returned from kern_features system call. */
417#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 418#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index e01d75d40329..66dacd56bb10 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1336,7 +1336,7 @@ int ldc_connect(struct ldc_channel *lp)
1336 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) || 1336 if (!(lp->flags & LDC_FLAG_ALLOCED_QUEUES) ||
1337 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) || 1337 !(lp->flags & LDC_FLAG_REGISTERED_QUEUES) ||
1338 lp->hs_state != LDC_HS_OPEN) 1338 lp->hs_state != LDC_HS_OPEN)
1339 err = -EINVAL; 1339 err = ((lp->hs_state > LDC_HS_OPEN) ? 0 : -EINVAL);
1340 else 1340 else
1341 err = start_handshake(lp); 1341 err = start_handshake(lp);
1342 1342
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb18650c..f834224208ed 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
51SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
51 52
52 .globl sys32_mmap2 53 .globl sys32_mmap2
53sys32_mmap2: 54sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8766cc..85fe9b1087cd 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2bb26cf..33ecba2826ea 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2
90 91
91#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
92 93
@@ -165,3 +166,4 @@ sys_call_table:
165/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
166 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
167/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2
diff --git a/arch/sparc/math-emu/math_32.c b/arch/sparc/math-emu/math_32.c
index aa4d55b0bdf0..5ce8f2f64604 100644
--- a/arch/sparc/math-emu/math_32.c
+++ b/arch/sparc/math-emu/math_32.c
@@ -499,7 +499,7 @@ static int do_one_mathemu(u32 insn, unsigned long *pfsr, unsigned long *fregs)
499 case 0: fsr = *pfsr; 499 case 0: fsr = *pfsr;
500 if (IR == -1) IR = 2; 500 if (IR == -1) IR = 2;
501 /* fcc is always fcc0 */ 501 /* fcc is always fcc0 */
502 fsr &= ~0xc00; fsr |= (IR << 10); break; 502 fsr &= ~0xc00; fsr |= (IR << 10);
503 *pfsr = fsr; 503 *pfsr = fsr;
504 break; 504 break;
505 case 1: rd->s = IR; break; 505 case 1: rd->s = IR; break;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 9f4f532e2627..cdc9f145e3c3 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -352,6 +352,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *
352 352
353 mm = vma->vm_mm; 353 mm = vma->vm_mm;
354 354
355 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
356 if (!pte_accessible(mm, pte))
357 return;
358
355 spin_lock_irqsave(&mm->context.lock, flags); 359 spin_lock_irqsave(&mm->context.lock, flags);
356 360
357#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 361#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -2620,6 +2624,10 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2620 2624
2621 pte = pmd_val(entry); 2625 pte = pmd_val(entry);
2622 2626
2627 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2628 if (!(pte & _PAGE_VALID))
2629 return;
2630
2623 /* We are fabricating 8MB pages using 4MB real hw pages. */ 2631 /* We are fabricating 8MB pages using 4MB real hw pages. */
2624 pte |= (addr & (1UL << REAL_HPAGE_SHIFT)); 2632 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2625 2633
@@ -2764,3 +2772,27 @@ static int __init report_memory(void)
2764 return 0; 2772 return 0;
2765} 2773}
2766device_initcall(report_memory); 2774device_initcall(report_memory);
2775
2776#ifdef CONFIG_SMP
2777#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2778#else
2779#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2780#endif
2781
2782void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2783{
2784 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2785 if (start < LOW_OBP_ADDRESS) {
2786 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2787 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2788 }
2789 if (end > HI_OBP_ADDRESS) {
2790 flush_tsb_kernel_range(end, HI_OBP_ADDRESS);
2791 do_flush_tlb_kernel_range(end, HI_OBP_ADDRESS);
2792 }
2793 } else {
2794 flush_tsb_kernel_range(start, end);
2795 do_flush_tlb_kernel_range(start, end);
2796 }
2797}
2798>>>>>>> c78f77e20d2ba5d4d5e478e85a6fb42556893e2d
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index a80029035bf2..f9e4fdd3b877 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
370 */ 370 */
371 detect_extended_topology(c); 371 detect_extended_topology(c);
372 372
373 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
374 /*
375 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
376 * detection.
377 */
378 c->x86_max_cores = intel_num_cpu_cores(c);
379#ifdef CONFIG_X86_32
380 detect_ht(c);
381#endif
382 }
383
373 l2 = init_intel_cacheinfo(c); 384 l2 = init_intel_cacheinfo(c);
374 if (c->cpuid_level > 9) { 385 if (c->cpuid_level > 9) {
375 unsigned eax = cpuid_eax(10); 386 unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
438 set_cpu_cap(c, X86_FEATURE_P3); 449 set_cpu_cap(c, X86_FEATURE_P3);
439#endif 450#endif
440 451
441 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
442 /*
443 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
444 * detection.
445 */
446 c->x86_max_cores = intel_num_cpu_cores(c);
447#ifdef CONFIG_X86_32
448 detect_ht(c);
449#endif
450 }
451
452 /* Work around errata */ 452 /* Work around errata */
453 srat_detect_node(c); 453 srat_detect_node(c);
454 454
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index a952e9c85b6f..9c8f7394c612 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
730#endif 730#endif
731 } 731 }
732 732
733#ifdef CONFIG_X86_HT
734 /*
735 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
736 * turns means that the only possibility is SMT (as indicated in
737 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
738 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
739 * c->phys_proc_id.
740 */
741 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
742 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
743#endif
744
733 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); 745 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
734 746
735 return l2; 747 return l2;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bb92f38153b2..9a79c8dbd8e8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
2451 for_each_online_cpu(i) { 2451 for_each_online_cpu(i) {
2452 err = mce_device_create(i); 2452 err = mce_device_create(i);
2453 if (err) { 2453 if (err) {
2454 /*
2455 * Register notifier anyway (and do not unreg it) so
2456 * that we don't leave undeleted timers, see notifier
2457 * callback above.
2458 */
2459 __register_hotcpu_notifier(&mce_cpu_notifier);
2454 cpu_notifier_register_done(); 2460 cpu_notifier_register_done();
2455 goto err_device_create; 2461 goto err_device_create;
2456 } 2462 }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
2471err_register: 2477err_register:
2472 unregister_syscore_ops(&mce_syscore_ops); 2478 unregister_syscore_ops(&mce_syscore_ops);
2473 2479
2474 cpu_notifier_register_begin();
2475 __unregister_hotcpu_notifier(&mce_cpu_notifier);
2476 cpu_notifier_register_done();
2477
2478err_device_create: 2480err_device_create:
2479 /* 2481 /*
2480 * We didn't keep track of which devices were created above, but 2482 * We didn't keep track of which devices were created above, but
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 2bdfbff8a4f6..2879ecdaac43 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
118 continue; 118 continue;
119 if (event->attr.config1 & ~er->valid_mask) 119 if (event->attr.config1 & ~er->valid_mask)
120 return -EINVAL; 120 return -EINVAL;
121 /* Check if the extra msrs can be safely accessed*/
122 if (!er->extra_msr_access)
123 return -ENXIO;
121 124
122 reg->idx = er->idx; 125 reg->idx = er->idx;
123 reg->config = event->attr.config1; 126 reg->config = event->attr.config1;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3b2f9bdd974b..8ade93111e03 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -295,14 +295,16 @@ struct extra_reg {
295 u64 config_mask; 295 u64 config_mask;
296 u64 valid_mask; 296 u64 valid_mask;
297 int idx; /* per_xxx->regs[] reg index */ 297 int idx; /* per_xxx->regs[] reg index */
298 bool extra_msr_access;
298}; 299};
299 300
300#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ 301#define EVENT_EXTRA_REG(e, ms, m, vm, i) { \
301 .event = (e), \ 302 .event = (e), \
302 .msr = (ms), \ 303 .msr = (ms), \
303 .config_mask = (m), \ 304 .config_mask = (m), \
304 .valid_mask = (vm), \ 305 .valid_mask = (vm), \
305 .idx = EXTRA_REG_##i, \ 306 .idx = EXTRA_REG_##i, \
307 .extra_msr_access = true, \
306 } 308 }
307 309
308#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ 310#define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 07846d738bdb..2502d0d9d246 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void)
2182 } 2182 }
2183} 2183}
2184 2184
2185/*
2186 * Under certain circumstances, access certain MSR may cause #GP.
2187 * The function tests if the input MSR can be safely accessed.
2188 */
2189static bool check_msr(unsigned long msr, u64 mask)
2190{
2191 u64 val_old, val_new, val_tmp;
2192
2193 /*
2194 * Read the current value, change it and read it back to see if it
2195 * matches, this is needed to detect certain hardware emulators
2196 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2197 */
2198 if (rdmsrl_safe(msr, &val_old))
2199 return false;
2200
2201 /*
2202 * Only change the bits which can be updated by wrmsrl.
2203 */
2204 val_tmp = val_old ^ mask;
2205 if (wrmsrl_safe(msr, val_tmp) ||
2206 rdmsrl_safe(msr, &val_new))
2207 return false;
2208
2209 if (val_new != val_tmp)
2210 return false;
2211
2212 /* Here it's sure that the MSR can be safely accessed.
2213 * Restore the old value and return.
2214 */
2215 wrmsrl(msr, val_old);
2216
2217 return true;
2218}
2219
2185static __init void intel_sandybridge_quirk(void) 2220static __init void intel_sandybridge_quirk(void)
2186{ 2221{
2187 x86_pmu.check_microcode = intel_snb_check_microcode; 2222 x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void)
2271 union cpuid10_ebx ebx; 2306 union cpuid10_ebx ebx;
2272 struct event_constraint *c; 2307 struct event_constraint *c;
2273 unsigned int unused; 2308 unsigned int unused;
2274 int version; 2309 struct extra_reg *er;
2310 int version, i;
2275 2311
2276 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { 2312 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2277 switch (boot_cpu_data.x86) { 2313 switch (boot_cpu_data.x86) {
@@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void)
2474 case 62: /* IvyBridge EP */ 2510 case 62: /* IvyBridge EP */
2475 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2511 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2476 sizeof(hw_cache_event_ids)); 2512 sizeof(hw_cache_event_ids));
2513 /* dTLB-load-misses on IVB is different than SNB */
2514 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
2515
2477 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2516 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2478 sizeof(hw_cache_extra_regs)); 2517 sizeof(hw_cache_extra_regs));
2479 2518
@@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void)
2574 } 2613 }
2575 } 2614 }
2576 2615
2616 /*
2617 * Access LBR MSR may cause #GP under certain circumstances.
2618 * E.g. KVM doesn't support LBR MSR
2619 * Check all LBT MSR here.
2620 * Disable LBR access if any LBR MSRs can not be accessed.
2621 */
2622 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
2623 x86_pmu.lbr_nr = 0;
2624 for (i = 0; i < x86_pmu.lbr_nr; i++) {
2625 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
2626 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
2627 x86_pmu.lbr_nr = 0;
2628 }
2629
2630 /*
2631 * Access extra MSR may cause #GP under certain circumstances.
2632 * E.g. KVM doesn't support offcore event
2633 * Check all extra_regs here.
2634 */
2635 if (x86_pmu.extra_regs) {
2636 for (er = x86_pmu.extra_regs; er->msr; er++) {
2637 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
2638 /* Disable LBR select mapping */
2639 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
2640 x86_pmu.lbr_sel_map = NULL;
2641 }
2642 }
2643
2577 /* Support full width counters using alternative MSR range */ 2644 /* Support full width counters using alternative MSR range */
2578 if (x86_pmu.intel_cap.full_width_write) { 2645 if (x86_pmu.intel_cap.full_width_write) {
2579 x86_pmu.max_period = x86_pmu.cntval_mask; 2646 x86_pmu.max_period = x86_pmu.cntval_mask;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 980970cb744d..696ade311ded 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
311 if (!x86_pmu.bts) 311 if (!x86_pmu.bts)
312 return 0; 312 return 0;
313 313
314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); 314 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
315 if (unlikely(!buffer)) 315 if (unlikely(!buffer)) {
316 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
316 return -ENOMEM; 317 return -ENOMEM;
318 }
317 319
318 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; 320 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
319 thresh = max / 16; 321 thresh = max / 16;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 65bbbea38b9c..ae6552a0701f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), 550 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), 551 SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), 552 SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), 553 SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), 554 SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), 555 SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), 556 SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), 557 SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), 558 SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), 559 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), 560 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), 561 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), 562 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), 563 SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), 564 SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), 565 SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, 1222 SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1), 1223 SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), 1224 SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
1225
1225 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), 1226 SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
1226 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), 1227 SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
1227 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), 1228 SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
1245 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), 1246 SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
1246 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), 1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
1247 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), 1248 SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
1248 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
1249 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), 1250 SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
1250 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), 1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
1251 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), 1252 SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index dbaa23e78b36..0d0c9d4ab6d5 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -425,8 +425,8 @@ sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae sysenter_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp)
429sysenter_after_call: 428sysenter_after_call:
429 movl %eax,PT_EAX(%esp)
430 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
431 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
432 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -502,6 +502,7 @@ ENTRY(system_call)
502 jae syscall_badsys 502 jae syscall_badsys
503syscall_call: 503syscall_call:
504 call *sys_call_table(,%eax,4) 504 call *sys_call_table(,%eax,4)
505syscall_after_call:
505 movl %eax,PT_EAX(%esp) # store the return value 506 movl %eax,PT_EAX(%esp) # store the return value
506syscall_exit: 507syscall_exit:
507 LOCKDEP_SYS_EXIT 508 LOCKDEP_SYS_EXIT
@@ -675,12 +676,12 @@ syscall_fault:
675END(syscall_fault) 676END(syscall_fault)
676 677
677syscall_badsys: 678syscall_badsys:
678 movl $-ENOSYS,PT_EAX(%esp) 679 movl $-ENOSYS,%eax
679 jmp syscall_exit 680 jmp syscall_after_call
680END(syscall_badsys) 681END(syscall_badsys)
681 682
682sysenter_badsys: 683sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp) 684 movl $-ENOSYS,%eax
684 jmp sysenter_after_call 685 jmp sysenter_after_call
685END(syscall_badsys) 686END(syscall_badsys)
686 CFI_ENDPROC 687 CFI_ENDPROC
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7596df664901..67e6d19ef1be 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
574 struct kprobe *p; 574 struct kprobe *p;
575 struct kprobe_ctlblk *kcb; 575 struct kprobe_ctlblk *kcb;
576 576
577 if (user_mode_vm(regs))
578 return 0;
579
577 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 580 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
578 /* 581 /*
579 * We don't want to be preempted for the entire 582 * We don't want to be preempted for the entire
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f6449334ec45..ef432f891d30 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
5887 kvm_x86_ops->set_nmi(vcpu); 5887 kvm_x86_ops->set_nmi(vcpu);
5888 } 5888 }
5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
5890 /*
5891 * Because interrupts can be injected asynchronously, we are
5892 * calling check_nested_events again here to avoid a race condition.
5893 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
5894 * proposal and current concerns. Perhaps we should be setting
5895 * KVM_REQ_EVENT only on certain events and not unconditionally?
5896 */
5897 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5898 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5899 if (r != 0)
5900 return r;
5901 }
5890 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5902 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5891 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5903 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5892 false); 5904 false);
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec346e35..8453e6e39895 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
376 beqz a2, 1f # if at start of vector, don't restore 376 beqz a2, 1f # if at start of vector, don't restore
377 377
378 addi a0, a0, -128 378 addi a0, a0, -128
379 bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 379 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
380 bbsi a0, 7, 2f 380
381 /*
382 * This fixup handler is for the extremely unlikely case where the
383 * overflow handler's reference thru a0 gets a hardware TLB refill
384 * that bumps out the (distinct, aliasing) TLB entry that mapped its
385 * prior references thru a9/a13, and where our reference now thru
386 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
387 */
388 movi a2, window_overflow_restore_a0_fixup
389 s32i a2, a3, EXC_TABLE_FIXUP
390 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
391 xsr a3, excsave1
392
393 bbsi.l a0, 7, 2f
381 394
382 /* 395 /*
383 * Restore a0 as saved by _WindowOverflow8(). 396 * Restore a0 as saved by _WindowOverflow8().
384 *
385 * FIXME: we really need a fixup handler for this L32E,
386 * for the extremely unlikely case where the overflow handler's
387 * reference thru a0 gets a hardware TLB refill that bumps out
388 * the (distinct, aliasing) TLB entry that mapped its prior
389 * references thru a9, and where our reference now thru a9
390 * gets a 2nd-level miss exception (not hardware TLB refill).
391 */ 397 */
392 398
393 l32e a2, a9, -16 399 l32e a0, a9, -16
394 wsr a2, depc # replace the saved a0 400 wsr a0, depc # replace the saved a0
395 j 1f 401 j 3f
396 402
3972: 4032:
398 /* 404 /*
399 * Restore a0 as saved by _WindowOverflow12(). 405 * Restore a0 as saved by _WindowOverflow12().
400 *
401 * FIXME: we really need a fixup handler for this L32E,
402 * for the extremely unlikely case where the overflow handler's
403 * reference thru a0 gets a hardware TLB refill that bumps out
404 * the (distinct, aliasing) TLB entry that mapped its prior
405 * references thru a13, and where our reference now thru a13
406 * gets a 2nd-level miss exception (not hardware TLB refill).
407 */ 406 */
408 407
409 l32e a2, a13, -16 408 l32e a0, a13, -16
410 wsr a2, depc # replace the saved a0 409 wsr a0, depc # replace the saved a0
4103:
411 xsr a3, excsave1
412 movi a0, 0
413 s32i a0, a3, EXC_TABLE_FIXUP
414 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
4111: 4151:
412 /* 416 /*
413 * Restore WindowBase while leaving all address registers restored. 417 * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
449 453
450 s32i a0, a2, PT_DEPC 454 s32i a0, a2, PT_DEPC
451 455
456_DoubleExceptionVector_handle_exception:
452 addx4 a0, a0, a3 457 addx4 a0, a0, a3
453 l32i a0, a0, EXC_TABLE_FAST_USER 458 l32i a0, a0, EXC_TABLE_FAST_USER
454 xsr a3, excsave1 459 xsr a3, excsave1
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
464 rotw -3 469 rotw -3
465 j 1b 470 j 1b
466 471
467 .end literal_prefix
468 472
469ENDPROC(_DoubleExceptionVector) 473ENDPROC(_DoubleExceptionVector)
470 474
471/* 475/*
476 * Fixup handler for TLB miss in double exception handler for window owerflow.
477 * We get here with windowbase set to the window that was being spilled and
478 * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
479 * (bit set) window.
480 *
481 * We do the following here:
482 * - go to the original window retaining a0 value;
483 * - set up exception stack to return back to appropriate a0 restore code
484 * (we'll need to rotate window back and there's no place to save this
485 * information, use different return address for that);
486 * - handle the exception;
487 * - go to the window that was being spilled;
488 * - set up window_overflow_restore_a0_fixup as a fixup routine;
489 * - reload a0;
490 * - restore the original window;
491 * - reset the default fixup routine;
492 * - return to user. By the time we get to this fixup handler all information
493 * about the conditions of the original double exception that happened in
494 * the window overflow handler is lost, so we just return to userspace to
495 * retry overflow from start.
496 *
497 * a0: value of depc, original value in depc
498 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
499 * a3: exctable, original value in excsave1
500 */
501
502ENTRY(window_overflow_restore_a0_fixup)
503
504 rsr a0, ps
505 extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
506 rsr a2, windowbase
507 sub a0, a2, a0
508 extui a0, a0, 0, 3
509 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
510 xsr a3, excsave1
511
512 _beqi a0, 1, .Lhandle_1
513 _beqi a0, 3, .Lhandle_3
514
515 .macro overflow_fixup_handle_exception_pane n
516
517 rsr a0, depc
518 rotw -\n
519
520 xsr a3, excsave1
521 wsr a2, depc
522 l32i a2, a3, EXC_TABLE_KSTK
523 s32i a0, a2, PT_AREG0
524
525 movi a0, .Lrestore_\n
526 s32i a0, a2, PT_DEPC
527 rsr a0, exccause
528 j _DoubleExceptionVector_handle_exception
529
530 .endm
531
532 overflow_fixup_handle_exception_pane 2
533.Lhandle_1:
534 overflow_fixup_handle_exception_pane 1
535.Lhandle_3:
536 overflow_fixup_handle_exception_pane 3
537
538 .macro overflow_fixup_restore_a0_pane n
539
540 rotw \n
541 /* Need to preserve a0 value here to be able to handle exception
542 * that may occur on a0 reload from stack. It may occur because
543 * TLB miss handler may not be atomic and pointer to page table
544 * may be lost before we get here. There are no free registers,
545 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
546 */
547 xsr a3, excsave1
548 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
549 movi a2, window_overflow_restore_a0_fixup
550 s32i a2, a3, EXC_TABLE_FIXUP
551 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
552 xsr a3, excsave1
553 bbsi.l a0, 7, 1f
554 l32e a0, a9, -16
555 j 2f
5561:
557 l32e a0, a13, -16
5582:
559 rotw -\n
560
561 .endm
562
563.Lrestore_2:
564 overflow_fixup_restore_a0_pane 2
565
566.Lset_default_fixup:
567 xsr a3, excsave1
568 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
569 movi a2, 0
570 s32i a2, a3, EXC_TABLE_FIXUP
571 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
572 xsr a3, excsave1
573 rfe
574
575.Lrestore_1:
576 overflow_fixup_restore_a0_pane 1
577 j .Lset_default_fixup
578.Lrestore_3:
579 overflow_fixup_restore_a0_pane 3
580 j .Lset_default_fixup
581
582ENDPROC(window_overflow_restore_a0_fixup)
583
584 .end literal_prefix
585/*
472 * Debug interrupt vector 586 * Debug interrupt vector
473 * 587 *
474 * There is not much space here, so simply jump to another handler. 588 * There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c0085dff..d16db6df86f8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@ SECTIONS
269 .UserExceptionVector.literal) 269 .UserExceptionVector.literal)
270 SECTION_VECTOR (_DoubleExceptionVector_literal, 270 SECTION_VECTOR (_DoubleExceptionVector_literal,
271 .DoubleExceptionVector.literal, 271 .DoubleExceptionVector.literal,
272 DOUBLEEXC_VECTOR_VADDR - 16, 272 DOUBLEEXC_VECTOR_VADDR - 40,
273 SIZEOF(.UserExceptionVector.text), 273 SIZEOF(.UserExceptionVector.text),
274 .UserExceptionVector.text) 274 .UserExceptionVector.text)
275 SECTION_VECTOR (_DoubleExceptionVector_text, 275 SECTION_VECTOR (_DoubleExceptionVector_text,
276 .DoubleExceptionVector.text, 276 .DoubleExceptionVector.text,
277 DOUBLEEXC_VECTOR_VADDR, 277 DOUBLEEXC_VECTOR_VADDR,
278 32, 278 40,
279 .DoubleExceptionVector.literal) 279 .DoubleExceptionVector.literal)
280 280
281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256bb215..77ed20209ca5 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
191 return -EINVAL; 191 return -EINVAL;
192 } 192 }
193 193
194 if (it && start - it->start < bank_sz) { 194 if (it && start - it->start <= bank_sz) {
195 if (start == it->start) { 195 if (start == it->start) {
196 if (end - it->start < bank_sz) { 196 if (end - it->start < bank_sz) {
197 it->start = end; 197 it->start = end;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc494ece..28d227c5ca77 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
872{ 872{
873 lockdep_assert_held(q->queue_lock); 873 lockdep_assert_held(q->queue_lock);
874 874
875 /*
876 * @q could be exiting and already have destroyed all blkgs as
877 * indicated by NULL root_blkg. If so, don't confuse policies.
878 */
879 if (!q->root_blkg)
880 return;
881
875 blk_throtl_drain(q); 882 blk_throtl_drain(q);
876} 883}
877 884
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d8672268..a185b86741e5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
27EXPORT_SYMBOL(blk_queue_find_tag); 27EXPORT_SYMBOL(blk_queue_find_tag);
28 28
29/** 29/**
30 * __blk_free_tags - release a given set of tag maintenance info 30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free 31 * @bqt: the tag map to free
32 * 32 *
33 * Tries to free the specified @bqt. Returns true if it was 33 * Drop the reference count on @bqt and frees it when the last reference
34 * actually freed and false if there are still references using it 34 * is dropped.
35 */ 35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt) 36void blk_free_tags(struct blk_queue_tag *bqt)
37{ 37{
38 int retval; 38 if (atomic_dec_and_test(&bqt->refcnt)) {
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < 39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth); 40 bqt->max_depth);
44 41
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
50 47
51 kfree(bqt); 48 kfree(bqt);
52 } 49 }
53
54 return retval;
55} 50}
51EXPORT_SYMBOL(blk_free_tags);
56 52
57/** 53/**
58 * __blk_queue_free_tags - release tag maintenance info 54 * __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
69 if (!bqt) 65 if (!bqt)
70 return; 66 return;
71 67
72 __blk_free_tags(bqt); 68 blk_free_tags(bqt);
73 69
74 q->queue_tags = NULL; 70 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76} 72}
77 73
78/** 74/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info 75 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device 76 * @q: the request queue for the device
96 * 77 *
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67cb773..a0926a6094b2 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
690 case BLKROSET: 690 case BLKROSET:
691 case BLKDISCARD: 691 case BLKDISCARD:
692 case BLKSECDISCARD: 692 case BLKSECDISCARD:
693 case BLKZEROOUT:
693 /* 694 /*
694 * the ones below are implemented in blkdev_locked_ioctl, 695 * the ones below are implemented in blkdev_locked_ioctl,
695 * but we call blkdev_ioctl, which gets the lock for us 696 * but we call blkdev_ioctl, which gets the lock for us
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e1115..4cd52a4541a9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
456 456
457 /* Promise */ 457 /* Promise */
458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
459 { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
459 460
460 /* Asmedia */ 461 /* Asmedia */
461 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ 462 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d90..677c0c1b03bd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4787 * ata_qc_new - Request an available ATA command, for queueing 4787 * ata_qc_new - Request an available ATA command, for queueing
4788 * @ap: target port 4788 * @ap: target port
4789 * 4789 *
4790 * Some ATA host controllers may implement a queue depth which is less
4791 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4792 * the hardware limitation.
4793 *
4790 * LOCKING: 4794 * LOCKING:
4791 * None. 4795 * None.
4792 */ 4796 */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4794static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4795{ 4799{
4796 struct ata_queued_cmd *qc = NULL; 4800 struct ata_queued_cmd *qc = NULL;
4801 unsigned int max_queue = ap->host->n_tags;
4797 unsigned int i, tag; 4802 unsigned int i, tag;
4798 4803
4799 /* no command while frozen */ 4804 /* no command while frozen */
4800 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4805 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4801 return NULL; 4806 return NULL;
4802 4807
4803 for (i = 0; i < ATA_MAX_QUEUE; i++) { 4808 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4804 tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; 4809 tag = tag < max_queue ? tag : 0;
4805 4810
4806 /* the last tag is reserved for internal command. */ 4811 /* the last tag is reserved for internal command. */
4807 if (tag == ATA_TAG_INTERNAL) 4812 if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
6088{ 6093{
6089 spin_lock_init(&host->lock); 6094 spin_lock_init(&host->lock);
6090 mutex_init(&host->eh_mutex); 6095 mutex_init(&host->eh_mutex);
6096 host->n_tags = ATA_MAX_QUEUE - 1;
6091 host->dev = dev; 6097 host->dev = dev;
6092 host->ops = ops; 6098 host->ops = ops;
6093} 6099}
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6169{ 6175{
6170 int i, rc; 6176 int i, rc;
6171 6177
6178 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6179
6172 /* host must have been started */ 6180 /* host must have been started */
6173 if (!(host->flags & ATA_HOST_STARTED)) { 6181 if (!(host->flags & ATA_HOST_STARTED)) {
6174 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6182 dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b8..dad83df555c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1811 case ATA_DEV_ATA: 1811 case ATA_DEV_ATA:
1812 if (err & ATA_ICRC) 1812 if (err & ATA_ICRC)
1813 qc->err_mask |= AC_ERR_ATA_BUS; 1813 qc->err_mask |= AC_ERR_ATA_BUS;
1814 if (err & ATA_UNC) 1814 if (err & (ATA_UNC | ATA_AMNF))
1815 qc->err_mask |= AC_ERR_MEDIA; 1815 qc->err_mask |= AC_ERR_MEDIA;
1816 if (err & ATA_IDNF) 1816 if (err & ATA_IDNF)
1817 qc->err_mask |= AC_ERR_INVALID; 1817 qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
2556 } 2556 }
2557 2557
2558 if (cmd->command != ATA_CMD_PACKET && 2558 if (cmd->command != ATA_CMD_PACKET &&
2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2560 ATA_ABORTED))) 2560 ATA_IDNF | ATA_ABORTED)))
2561 ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 2561 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2562 res->feature & ATA_ICRC ? "ICRC " : "", 2562 res->feature & ATA_ICRC ? "ICRC " : "",
2563 res->feature & ATA_UNC ? "UNC " : "", 2563 res->feature & ATA_UNC ? "UNC " : "",
2564 res->feature & ATA_AMNF ? "AMNF " : "",
2564 res->feature & ATA_IDNF ? "IDNF " : "", 2565 res->feature & ATA_IDNF ? "IDNF " : "",
2565 res->feature & ATA_ABORTED ? "ABRT " : ""); 2566 res->feature & ATA_ABORTED ? "ABRT " : "");
2566#endif 2567#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce34..4d37c5415fc7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
915 struct ep93xx_pata_data *drv_data; 915 struct ep93xx_pata_data *drv_data;
916 struct ata_host *host; 916 struct ata_host *host;
917 struct ata_port *ap; 917 struct ata_port *ap;
918 unsigned int irq; 918 int irq;
919 struct resource *mem_res; 919 struct resource *mem_res;
920 void __iomem *ide_base; 920 void __iomem *ide_base;
921 int err; 921 int err;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b7..3f2e16738080 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
544 struct task_struct *opa; 544 struct task_struct *opa;
545 545
546 kref_get(&connection->kref); 546 kref_get(&connection->kref);
547 /* We may just have force_sig()'ed this thread
548 * to get it out of some blocking network function.
549 * Clear signals; otherwise kthread_run(), which internally uses
550 * wait_on_completion_killable(), will mistake our pending signal
551 * for a new fatal signal and fail. */
552 flush_signals(current);
547 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); 553 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
548 if (IS_ERR(opa)) { 554 if (IS_ERR(opa)) {
549 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); 555 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37be..36e54be402df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622 memset(&zram->stats, 0, sizeof(zram->stats)); 622 memset(&zram->stats, 0, sizeof(zram->stats));
623 623
624 zram->disksize = 0; 624 zram->disksize = 0;
625 if (reset_capacity) { 625 if (reset_capacity)
626 set_capacity(zram->disk, 0); 626 set_capacity(zram->disk, 0);
627 revalidate_disk(zram->disk); 627
628 }
629 up_write(&zram->init_lock); 628 up_write(&zram->init_lock);
629
630 /*
631 * Revalidate disk out of the init_lock to avoid lockdep splat.
632 * It's okay because disk's capacity is protected by init_lock
633 * so that revalidate_disk always sees up-to-date capacity.
634 */
635 if (reset_capacity)
636 revalidate_disk(zram->disk);
630} 637}
631 638
632static ssize_t disksize_store(struct device *dev, 639static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
666 zram->comp = comp; 673 zram->comp = comp;
667 zram->disksize = disksize; 674 zram->disksize = disksize;
668 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 675 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669 revalidate_disk(zram->disk);
670 up_write(&zram->init_lock); 676 up_write(&zram->init_lock);
677
678 /*
679 * Revalidate disk out of the init_lock to avoid lockdep splat.
680 * It's okay because disk's capacity is protected by init_lock
681 * so that revalidate_disk always sees up-to-date capacity.
682 */
683 revalidate_disk(zram->disk);
684
671 return len; 685 return len;
672 686
673out_destroy_comp: 687out_destroy_comp:
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57985410f12f..a66a3217f1d9 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@ static const struct {
336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, 336 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
337 337
338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, 338 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
339 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 339 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
340 340
341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, 341 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
342 0}, 342 QUIRK_NO_MSI},
343 343
344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 344 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 345 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1ac..b6ae89ea8811 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
284 284
285static struct irq_domain_ops gpio_rcar_irq_domain_ops = { 285static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
286 .map = gpio_rcar_irq_domain_map, 286 .map = gpio_rcar_irq_domain_map,
287 .xlate = irq_domain_xlate_twocell,
287}; 288};
288 289
289struct gpio_rcar_info { 290struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d26..d893e4da5dce 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@ out:
1616 return ret; 1616 return ret;
1617} 1617}
1618 1618
1619void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1620{
1621 struct i915_vma *vma;
1622
1623 /*
1624 * Only the global gtt is relevant for gtt memory mappings, so restrict
1625 * list traversal to objects bound into the global address space. Note
1626 * that the active list should be empty, but better safe than sorry.
1627 */
1628 WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
1629 list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
1630 i915_gem_release_mmap(vma->obj);
1631 list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
1632 i915_gem_release_mmap(vma->obj);
1633}
1634
1635/** 1619/**
1636 * i915_gem_release_mmap - remove physical page mappings 1620 * i915_gem_release_mmap - remove physical page mappings
1637 * @obj: obj in question 1621 * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1657 obj->fault_mappable = false; 1641 obj->fault_mappable = false;
1658} 1642}
1659 1643
1644void
1645i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1646{
1647 struct drm_i915_gem_object *obj;
1648
1649 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1650 i915_gem_release_mmap(obj);
1651}
1652
1660uint32_t 1653uint32_t
1661i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1654i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1662{ 1655{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a178..34894b573064 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
31struct i915_render_state { 31struct i915_render_state {
32 struct drm_i915_gem_object *obj; 32 struct drm_i915_gem_object *obj;
33 unsigned long ggtt_offset; 33 unsigned long ggtt_offset;
34 void *batch; 34 u32 *batch;
35 u32 size; 35 u32 size;
36 u32 len; 36 u32 len;
37}; 37};
@@ -80,7 +80,7 @@ free:
80 80
81static void render_state_free(struct i915_render_state *so) 81static void render_state_free(struct i915_render_state *so)
82{ 82{
83 kunmap(so->batch); 83 kunmap(kmap_to_page(so->batch));
84 i915_gem_object_ggtt_unpin(so->obj); 84 i915_gem_object_ggtt_unpin(so->obj);
85 drm_gem_object_unreference(&so->obj->base); 85 drm_gem_object_unreference(&so->obj->base);
86 kfree(so); 86 kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069765ad..c05c84f3f091 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2845{ 2845{
2846 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2846 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2847 struct intel_engine_cs *signaller; 2847 struct intel_engine_cs *signaller;
2848 u32 seqno, ctl; 2848 u32 seqno;
2849 2849
2850 ring->hangcheck.deadlock++; 2850 ring->hangcheck.deadlock++;
2851 2851
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2857 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2858 return -1; 2858 return -1;
2859 2859
2860 /* cursory check for an unkickable deadlock */
2861 ctl = I915_READ_CTL(signaller);
2862 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2863 return -1;
2864
2865 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2860 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2866 return 1; 2861 return 1;
2867 2862
2868 if (signaller->hangcheck.deadlock) 2863 /* cursory check for an unkickable deadlock */
2864 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2865 semaphore_passed(signaller) < 0)
2869 return -1; 2866 return -1;
2870 2867
2871 return 0; 2868 return 0;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0b2471107137..c0ea66192fe0 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
2291 gb_tile_moden = 0; 2291 gb_tile_moden = 0;
2292 break; 2292 break;
2293 } 2293 }
2294 rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
2294 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2295 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2295 } 2296 }
2296 } else if (num_pipe_configs == 8) { 2297 } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
7376 tmp = RREG32(IH_RB_CNTL); 7377 tmp = RREG32(IH_RB_CNTL);
7377 tmp |= IH_WPTR_OVERFLOW_CLEAR; 7378 tmp |= IH_WPTR_OVERFLOW_CLEAR;
7378 WREG32(IH_RB_CNTL, tmp); 7379 WREG32(IH_RB_CNTL, tmp);
7380 wptr &= ~RB_OVERFLOW;
7379 } 7381 }
7380 return (wptr & rdev->ih.ptr_mask); 7382 return (wptr & rdev->ih.ptr_mask);
7381} 7383}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 250bac3935a4..15e4f28015e1 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4756 tmp = RREG32(IH_RB_CNTL); 4756 tmp = RREG32(IH_RB_CNTL);
4757 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4757 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4758 WREG32(IH_RB_CNTL, tmp); 4758 WREG32(IH_RB_CNTL, tmp);
4759 wptr &= ~RB_OVERFLOW;
4759 } 4760 }
4760 return (wptr & rdev->ih.ptr_mask); 4761 return (wptr & rdev->ih.ptr_mask);
4761} 4762}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00c..3c69f58e46ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3795 tmp = RREG32(IH_RB_CNTL); 3795 tmp = RREG32(IH_RB_CNTL);
3796 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3796 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3797 WREG32(IH_RB_CNTL, tmp); 3797 WREG32(IH_RB_CNTL, tmp);
3798 wptr &= ~RB_OVERFLOW;
3798 } 3799 }
3799 return (wptr & rdev->ih.ptr_mask); 3800 return (wptr & rdev->ih.ptr_mask);
3800} 3801}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b7204500a9a6..60c47f829122 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@ struct radeon_bo_va {
449 449
450 /* protected by vm mutex */ 450 /* protected by vm mutex */
451 struct list_head vm_list; 451 struct list_head vm_list;
452 struct list_head vm_status;
452 453
453 /* constant after initialization */ 454 /* constant after initialization */
454 struct radeon_vm *vm; 455 struct radeon_vm *vm;
@@ -867,6 +868,9 @@ struct radeon_vm {
867 struct list_head va; 868 struct list_head va;
868 unsigned id; 869 unsigned id;
869 870
871 /* BOs freed, but not yet updated in the PT */
872 struct list_head freed;
873
870 /* contains the page directory */ 874 /* contains the page directory */
871 struct radeon_bo *page_directory; 875 struct radeon_bo *page_directory;
872 uint64_t pd_gpu_addr; 876 uint64_t pd_gpu_addr;
@@ -875,6 +879,8 @@ struct radeon_vm {
875 /* array of page tables, one for each page directory entry */ 879 /* array of page tables, one for each page directory entry */
876 struct radeon_vm_pt *page_tables; 880 struct radeon_vm_pt *page_tables;
877 881
882 struct radeon_bo_va *ib_bo_va;
883
878 struct mutex mutex; 884 struct mutex mutex;
879 /* last fence for cs using this vm */ 885 /* last fence for cs using this vm */
880 struct radeon_fence *fence; 886 struct radeon_fence *fence;
@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
2832uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); 2838uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
2833int radeon_vm_update_page_directory(struct radeon_device *rdev, 2839int radeon_vm_update_page_directory(struct radeon_device *rdev,
2834 struct radeon_vm *vm); 2840 struct radeon_vm *vm);
2841int radeon_vm_clear_freed(struct radeon_device *rdev,
2842 struct radeon_vm *vm);
2835int radeon_vm_bo_update(struct radeon_device *rdev, 2843int radeon_vm_bo_update(struct radeon_device *rdev,
2836 struct radeon_vm *vm, 2844 struct radeon_bo_va *bo_va,
2837 struct radeon_bo *bo,
2838 struct ttm_mem_reg *mem); 2845 struct ttm_mem_reg *mem);
2839void radeon_vm_bo_invalidate(struct radeon_device *rdev, 2846void radeon_vm_bo_invalidate(struct radeon_device *rdev,
2840 struct radeon_bo *bo); 2847 struct radeon_bo *bo);
@@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
2847 struct radeon_bo_va *bo_va, 2854 struct radeon_bo_va *bo_va,
2848 uint64_t offset, 2855 uint64_t offset,
2849 uint32_t flags); 2856 uint32_t flags);
2850int radeon_vm_bo_rmv(struct radeon_device *rdev, 2857void radeon_vm_bo_rmv(struct radeon_device *rdev,
2851 struct radeon_bo_va *bo_va); 2858 struct radeon_bo_va *bo_va);
2852 2859
2853/* audio */ 2860/* audio */
2854void r600_audio_update_hdmi(struct work_struct *work); 2861void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a143461478..ae763f60c8a0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
461 struct radeon_vm *vm) 461 struct radeon_vm *vm)
462{ 462{
463 struct radeon_device *rdev = p->rdev; 463 struct radeon_device *rdev = p->rdev;
464 struct radeon_bo_va *bo_va;
464 int i, r; 465 int i, r;
465 466
466 r = radeon_vm_update_page_directory(rdev, vm); 467 r = radeon_vm_update_page_directory(rdev, vm);
467 if (r) 468 if (r)
468 return r; 469 return r;
469 470
470 r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, 471 r = radeon_vm_clear_freed(rdev, vm);
472 if (r)
473 return r;
474
475 if (vm->ib_bo_va == NULL) {
476 DRM_ERROR("Tmp BO not in VM!\n");
477 return -EINVAL;
478 }
479
480 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
471 &rdev->ring_tmp_bo.bo->tbo.mem); 481 &rdev->ring_tmp_bo.bo->tbo.mem);
472 if (r) 482 if (r)
473 return r; 483 return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
480 continue; 490 continue;
481 491
482 bo = p->relocs[i].robj; 492 bo = p->relocs[i].robj;
483 r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem); 493 bo_va = radeon_vm_bo_find(vm, bo);
494 if (bo_va == NULL) {
495 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
496 return -EINVAL;
497 }
498
499 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
484 if (r) 500 if (r)
485 return r; 501 return r;
486 } 502 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842d..697add2cd4e3 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1056 if (!radeon_check_pot_argument(radeon_vm_size)) { 1056 if (!radeon_check_pot_argument(radeon_vm_size)) {
1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", 1057 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1058 radeon_vm_size); 1058 radeon_vm_size);
1059 radeon_vm_size = 4096; 1059 radeon_vm_size = 4;
1060 } 1060 }
1061 1061
1062 if (radeon_vm_size < 4) { 1062 if (radeon_vm_size < 1) {
1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n", 1063 dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
1064 radeon_vm_size); 1064 radeon_vm_size);
1065 radeon_vm_size = 4096; 1065 radeon_vm_size = 4;
1066 } 1066 }
1067 1067
1068 /* 1068 /*
1069 * Max GPUVM size for Cayman, SI and CI are 40 bits. 1069 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1070 */ 1070 */
1071 if (radeon_vm_size > 1024*1024) { 1071 if (radeon_vm_size > 1024) {
1072 dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n", 1072 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1073 radeon_vm_size); 1073 radeon_vm_size);
1074 radeon_vm_size = 4096; 1074 radeon_vm_size = 4;
1075 } 1075 }
1076 1076
1077 /* defines number of bits in page table versus page directory, 1077 /* defines number of bits in page table versus page directory,
1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the 1078 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1079 * page table and the remaining bits are in the page directory */ 1079 * page table and the remaining bits are in the page directory */
1080 if (radeon_vm_block_size < 9) { 1080 if (radeon_vm_block_size < 9) {
1081 dev_warn(rdev->dev, "VM page table size (%d) to small\n", 1081 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1082 radeon_vm_block_size); 1082 radeon_vm_block_size);
1083 radeon_vm_block_size = 9; 1083 radeon_vm_block_size = 9;
1084 } 1084 }
1085 1085
1086 if (radeon_vm_block_size > 24 || 1086 if (radeon_vm_block_size > 24 ||
1087 radeon_vm_size < (1ull << radeon_vm_block_size)) { 1087 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1088 dev_warn(rdev->dev, "VM page table size (%d) to large\n", 1088 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1089 radeon_vm_block_size); 1089 radeon_vm_block_size);
1090 radeon_vm_block_size = 9; 1090 radeon_vm_block_size = 9;
1091 } 1091 }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
1238 /* Adjust VM size here. 1238 /* Adjust VM size here.
1239 * Max GPUVM size for cayman+ is 40 bits. 1239 * Max GPUVM size for cayman+ is 40 bits.
1240 */ 1240 */
1241 rdev->vm_manager.max_pfn = radeon_vm_size << 8; 1241 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1242 1242
1243 /* Set asic functions */ 1243 /* Set asic functions */
1244 r = radeon_asic_init(rdev); 1244 r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3a..e9e361084249 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
173int radeon_aspm = -1; 173int radeon_aspm = -1;
174int radeon_runtime_pm = -1; 174int radeon_runtime_pm = -1;
175int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096; 176int radeon_vm_size = 4;
177int radeon_vm_block_size = 9; 177int radeon_vm_block_size = 9;
178int radeon_deep_color = 0; 178int radeon_deep_color = 0;
179 179
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))"); 243MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
244module_param_named(hard_reset, radeon_hard_reset, int, 0444); 244module_param_named(hard_reset, radeon_hard_reset, int, 0444);
245 245
246MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)"); 246MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
247module_param_named(vm_size, radeon_vm_size, int, 0444); 247module_param_named(vm_size, radeon_vm_size, int, 0444);
248 248
249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); 249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4b..d25ae6acfd5a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
579 /* new gpu have virtual address space support */ 579 /* new gpu have virtual address space support */
580 if (rdev->family >= CHIP_CAYMAN) { 580 if (rdev->family >= CHIP_CAYMAN) {
581 struct radeon_fpriv *fpriv; 581 struct radeon_fpriv *fpriv;
582 struct radeon_bo_va *bo_va; 582 struct radeon_vm *vm;
583 int r; 583 int r;
584 584
585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 585 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
587 return -ENOMEM; 587 return -ENOMEM;
588 } 588 }
589 589
590 r = radeon_vm_init(rdev, &fpriv->vm); 590 vm = &fpriv->vm;
591 r = radeon_vm_init(rdev, vm);
591 if (r) { 592 if (r) {
592 kfree(fpriv); 593 kfree(fpriv);
593 return r; 594 return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
596 if (rdev->accel_working) { 597 if (rdev->accel_working) {
597 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 598 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
598 if (r) { 599 if (r) {
599 radeon_vm_fini(rdev, &fpriv->vm); 600 radeon_vm_fini(rdev, vm);
600 kfree(fpriv); 601 kfree(fpriv);
601 return r; 602 return r;
602 } 603 }
603 604
604 /* map the ib pool buffer read only into 605 /* map the ib pool buffer read only into
605 * virtual address space */ 606 * virtual address space */
606 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, 607 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
607 rdev->ring_tmp_bo.bo); 608 rdev->ring_tmp_bo.bo);
608 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, 609 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
610 RADEON_VA_IB_OFFSET,
609 RADEON_VM_PAGE_READABLE | 611 RADEON_VM_PAGE_READABLE |
610 RADEON_VM_PAGE_SNOOPED); 612 RADEON_VM_PAGE_SNOOPED);
611 613
612 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 614 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
613 if (r) { 615 if (r) {
614 radeon_vm_fini(rdev, &fpriv->vm); 616 radeon_vm_fini(rdev, vm);
615 kfree(fpriv); 617 kfree(fpriv);
616 return r; 618 return r;
617 } 619 }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
640 /* new gpu have virtual address space support */ 642 /* new gpu have virtual address space support */
641 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 643 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
642 struct radeon_fpriv *fpriv = file_priv->driver_priv; 644 struct radeon_fpriv *fpriv = file_priv->driver_priv;
643 struct radeon_bo_va *bo_va; 645 struct radeon_vm *vm = &fpriv->vm;
644 int r; 646 int r;
645 647
646 if (rdev->accel_working) { 648 if (rdev->accel_working) {
647 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 649 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
648 if (!r) { 650 if (!r) {
649 bo_va = radeon_vm_bo_find(&fpriv->vm, 651 if (vm->ib_bo_va)
650 rdev->ring_tmp_bo.bo); 652 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
651 if (bo_va)
652 radeon_vm_bo_rmv(rdev, bo_va);
653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 653 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
654 } 654 }
655 } 655 }
656 656
657 radeon_vm_fini(rdev, &fpriv->vm); 657 radeon_vm_fini(rdev, vm);
658 kfree(fpriv); 658 kfree(fpriv);
659 file_priv->driver_priv = NULL; 659 file_priv->driver_priv = NULL;
660 } 660 }
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd341..725d3669014f 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
332 bo_va->ref_count = 1; 332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list); 333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list); 334 INIT_LIST_HEAD(&bo_va->vm_list);
335 INIT_LIST_HEAD(&bo_va->vm_status);
335 336
336 mutex_lock(&vm->mutex); 337 mutex_lock(&vm->mutex);
337 list_add(&bo_va->vm_list, &vm->va); 338 list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
468 head = &tmp->vm_list; 469 head = &tmp->vm_list;
469 } 470 }
470 471
472 if (bo_va->soffset) {
473 /* add a clone of the bo_va to clear the old address */
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
475 if (!tmp) {
476 mutex_unlock(&vm->mutex);
477 return -ENOMEM;
478 }
479 tmp->soffset = bo_va->soffset;
480 tmp->eoffset = bo_va->eoffset;
481 tmp->vm = vm;
482 list_add(&tmp->vm_status, &vm->freed);
483 }
484
471 bo_va->soffset = soffset; 485 bo_va->soffset = soffset;
472 bo_va->eoffset = eoffset; 486 bo_va->eoffset = eoffset;
473 bo_va->flags = flags; 487 bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
823 * Object have to be reserved and mutex must be locked! 837 * Object have to be reserved and mutex must be locked!
824 */ 838 */
825int radeon_vm_bo_update(struct radeon_device *rdev, 839int radeon_vm_bo_update(struct radeon_device *rdev,
826 struct radeon_vm *vm, 840 struct radeon_bo_va *bo_va,
827 struct radeon_bo *bo,
828 struct ttm_mem_reg *mem) 841 struct ttm_mem_reg *mem)
829{ 842{
843 struct radeon_vm *vm = bo_va->vm;
830 struct radeon_ib ib; 844 struct radeon_ib ib;
831 struct radeon_bo_va *bo_va;
832 unsigned nptes, ndw; 845 unsigned nptes, ndw;
833 uint64_t addr; 846 uint64_t addr;
834 int r; 847 int r;
835 848
836 bo_va = radeon_vm_bo_find(vm, bo);
837 if (bo_va == NULL) {
838 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
839 return -EINVAL;
840 }
841 849
842 if (!bo_va->soffset) { 850 if (!bo_va->soffset) {
843 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 851 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
844 bo, vm); 852 bo_va->bo, vm);
845 return -EINVAL; 853 return -EINVAL;
846 } 854 }
847 855
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
868 876
869 trace_radeon_vm_bo_update(bo_va); 877 trace_radeon_vm_bo_update(bo_va);
870 878
871 nptes = radeon_bo_ngpu_pages(bo); 879 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
872 880
873 /* padding, etc. */ 881 /* padding, etc. */
874 ndw = 64; 882 ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
911} 919}
912 920
913/** 921/**
922 * radeon_vm_clear_freed - clear freed BOs in the PT
923 *
924 * @rdev: radeon_device pointer
925 * @vm: requested vm
926 *
927 * Make sure all freed BOs are cleared in the PT.
928 * Returns 0 for success.
929 *
930 * PTs have to be reserved and mutex must be locked!
931 */
932int radeon_vm_clear_freed(struct radeon_device *rdev,
933 struct radeon_vm *vm)
934{
935 struct radeon_bo_va *bo_va, *tmp;
936 int r;
937
938 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
939 list_del(&bo_va->vm_status);
940 r = radeon_vm_bo_update(rdev, bo_va, NULL);
941 kfree(bo_va);
942 if (r)
943 return r;
944 }
945 return 0;
946
947}
948
949/**
914 * radeon_vm_bo_rmv - remove a bo to a specific vm 950 * radeon_vm_bo_rmv - remove a bo to a specific vm
915 * 951 *
916 * @rdev: radeon_device pointer 952 * @rdev: radeon_device pointer
917 * @bo_va: requested bo_va 953 * @bo_va: requested bo_va
918 * 954 *
919 * Remove @bo_va->bo from the requested vm (cayman+). 955 * Remove @bo_va->bo from the requested vm (cayman+).
920 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
921 * remove the ptes for @bo_va in the page table.
922 * Returns 0 for success.
923 * 956 *
924 * Object have to be reserved! 957 * Object have to be reserved!
925 */ 958 */
926int radeon_vm_bo_rmv(struct radeon_device *rdev, 959void radeon_vm_bo_rmv(struct radeon_device *rdev,
927 struct radeon_bo_va *bo_va) 960 struct radeon_bo_va *bo_va)
928{ 961{
929 int r = 0; 962 struct radeon_vm *vm = bo_va->vm;
930 963
931 mutex_lock(&bo_va->vm->mutex); 964 list_del(&bo_va->bo_list);
932 if (bo_va->soffset)
933 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
934 965
966 mutex_lock(&vm->mutex);
935 list_del(&bo_va->vm_list); 967 list_del(&bo_va->vm_list);
936 mutex_unlock(&bo_va->vm->mutex);
937 list_del(&bo_va->bo_list);
938 968
939 kfree(bo_va); 969 if (bo_va->soffset) {
940 return r; 970 bo_va->bo = NULL;
971 list_add(&bo_va->vm_status, &vm->freed);
972 } else {
973 kfree(bo_va);
974 }
975
976 mutex_unlock(&vm->mutex);
941} 977}
942 978
943/** 979/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
975 int r; 1011 int r;
976 1012
977 vm->id = 0; 1013 vm->id = 0;
1014 vm->ib_bo_va = NULL;
978 vm->fence = NULL; 1015 vm->fence = NULL;
979 vm->last_flush = NULL; 1016 vm->last_flush = NULL;
980 vm->last_id_use = NULL; 1017 vm->last_id_use = NULL;
981 mutex_init(&vm->mutex); 1018 mutex_init(&vm->mutex);
982 INIT_LIST_HEAD(&vm->va); 1019 INIT_LIST_HEAD(&vm->va);
1020 INIT_LIST_HEAD(&vm->freed);
983 1021
984 pd_size = radeon_vm_directory_size(rdev); 1022 pd_size = radeon_vm_directory_size(rdev);
985 pd_entries = radeon_vm_num_pdes(rdev); 1023 pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1034 kfree(bo_va); 1072 kfree(bo_va);
1035 } 1073 }
1036 } 1074 }
1037 1075 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1076 kfree(bo_va);
1038 1077
1039 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 1078 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1040 radeon_bo_unref(&vm->page_tables[i].bo); 1079 radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eba0225259a4..9e854fd016da 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6103 tmp = RREG32(IH_RB_CNTL); 6103 tmp = RREG32(IH_RB_CNTL);
6104 tmp |= IH_WPTR_OVERFLOW_CLEAR; 6104 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6105 WREG32(IH_RB_CNTL, tmp); 6105 WREG32(IH_RB_CNTL, tmp);
6106 wptr &= ~RB_OVERFLOW;
6106 } 6107 }
6107 return (wptr & rdev->ih.ptr_mask); 6108 return (wptr & rdev->ih.ptr_mask);
6108} 6109}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183df..32e50be9c4ac 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 /* There are stability issues reported on latops with 1877 /* There are stability issues reported on with
1878 * bapm installed when switching between AC and battery 1878 * bapm enabled when switching between AC and battery
1879 * power. At the same time, some desktop boards hang 1879 * power. At the same time, some MSI boards hang
1880 * if it's not enabled and dpm is enabled. 1880 * if it's not enabled and dpm is enabled. Just enable
1881 * it for MSI boards right now.
1881 */ 1882 */
1882 if (rdev->flags & RADEON_IS_MOBILITY) 1883 if (rdev->pdev->subsystem_vendor == 0x1462)
1883 pi->enable_bapm = false;
1884 else
1885 pi->enable_bapm = true; 1884 pi->enable_bapm = true;
1885 else
1886 pi->enable_bapm = false;
1886 pi->enable_nbps_policy = true; 1887 pi->enable_nbps_policy = true;
1887 pi->enable_sclk_ds = true; 1888 pi->enable_sclk_ds = true;
1888 pi->enable_gfx_power_gating = true; 1889 pi->enable_gfx_power_gating = true;
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c59239f..34b9a601ad07 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
86 */ 86 */
87static inline s8 TEMP_TO_REG(int val) 87static inline s8 TEMP_TO_REG(int val)
88{ 88{
89 return clamp_val(SCALE(val, 1, 1000), -128000, 127000); 89 return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
90} 90}
91 91
92static inline int TEMP_FROM_REG(s8 val) 92static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
384 err = kstrtoul(buf, 10, &val); 384 err = kstrtoul(buf, 10, &val);
385 if (err) 385 if (err)
386 return err; 386 return err;
387 if (val > 255)
388 return -EINVAL;
387 389
388 data->vrm = val; 390 data->vrm = val;
389 return count; 391 return count;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d87..a04c49f2a011 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
416 416
417config BLK_DEV_CS5520 417config BLK_DEV_CS5520
418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" 418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
419 depends on X86_32 || COMPILE_TEST
419 select BLK_DEV_IDEDMA_PCI 420 select BLK_DEV_IDEDMA_PCI
420 help 421 help
421 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX 422 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
426 427
427config BLK_DEV_CS5530 428config BLK_DEV_CS5530
428 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" 429 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
430 depends on X86_32 || COMPILE_TEST
429 select BLK_DEV_IDEDMA_PCI 431 select BLK_DEV_IDEDMA_PCI
430 help 432 help
431 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This 433 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
435 437
436config BLK_DEV_CS5535 438config BLK_DEV_CS5535
437 tristate "AMD CS5535 chipset support" 439 tristate "AMD CS5535 chipset support"
438 depends on X86 && !X86_64 440 depends on X86_32
439 select BLK_DEV_IDEDMA_PCI 441 select BLK_DEV_IDEDMA_PCI
440 help 442 help
441 Include support for UDMA on the NSC/AMD CS5535 companion chipset. 443 Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
486 488
487config BLK_DEV_SC1200 489config BLK_DEV_SC1200
488 tristate "National SCx200 chipset support" 490 tristate "National SCx200 chipset support"
491 depends on X86_32 || COMPILE_TEST
489 select BLK_DEV_IDEDMA_PCI 492 select BLK_DEV_IDEDMA_PCI
490 help 493 help
491 This driver adds support for the on-board IDE controller on the 494 This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370e..a3d3b1733c49 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
853 if (irq_handler == NULL) 853 if (irq_handler == NULL)
854 irq_handler = ide_intr; 854 irq_handler = ide_intr;
855 855
856 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) 856 if (!host->get_lock)
857 goto out_up; 857 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
858 goto out_up;
858 859
859#if !defined(__mc68000__) 860#if !defined(__mc68000__)
860 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 861 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
1533 1534
1534 ide_proc_unregister_port(hwif); 1535 ide_proc_unregister_port(hwif);
1535 1536
1536 free_irq(hwif->irq, hwif); 1537 if (!hwif->host->get_lock)
1538 free_irq(hwif->irq, hwif);
1537 1539
1538 device_unregister(hwif->portdev); 1540 device_unregister(hwif->portdev);
1539 device_unregister(&hwif->gendev); 1541 device_unregister(&hwif->gendev);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db05550..29ca0bb4f561 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
257} 257}
258 258
259static int input_get_disposition(struct input_dev *dev, 259static int input_get_disposition(struct input_dev *dev,
260 unsigned int type, unsigned int code, int value) 260 unsigned int type, unsigned int code, int *pval)
261{ 261{
262 int disposition = INPUT_IGNORE_EVENT; 262 int disposition = INPUT_IGNORE_EVENT;
263 int value = *pval;
263 264
264 switch (type) { 265 switch (type) {
265 266
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
357 break; 358 break;
358 } 359 }
359 360
361 *pval = value;
360 return disposition; 362 return disposition;
361} 363}
362 364
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
365{ 367{
366 int disposition; 368 int disposition;
367 369
368 disposition = input_get_disposition(dev, type, code, value); 370 disposition = input_get_disposition(dev, type, code, &value);
369 371
370 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 372 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
371 dev->event(dev, type, code, value); 373 dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b48731415..de7be4f03d91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
215 return 0; 215 return 0;
216} 216}
217 217
218#ifdef CONFIG_PM_SLEEP
218static int keyscan_suspend(struct device *dev) 219static int keyscan_suspend(struct device *dev)
219{ 220{
220 struct platform_device *pdev = to_platform_device(dev); 221 struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
249 mutex_unlock(&input->mutex); 250 mutex_unlock(&input->mutex);
250 return retval; 251 return retval;
251} 252}
253#endif
252 254
253static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); 255static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
254 256
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6d..fed5102e1802 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
213 213
214module_platform_driver(sirfsoc_pwrc_driver); 214module_platform_driver(sirfsoc_pwrc_driver);
215 215
216MODULE_LICENSE("GPLv2"); 216MODULE_LICENSE("GPL v2");
217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); 217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); 218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
219MODULE_ALIAS("platform:sirfsoc-pwrc"); 219MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f06..ef9e0b8a9aa7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
132 1232, 5710, 1156, 4696 132 1232, 5710, 1156, 4696
133 }, 133 },
134 { 134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, 135 (const char * const []){"LEN0034", "LEN0036", "LEN2002",
136 "LEN2004", NULL},
136 1024, 5112, 2024, 4832 137 1024, 5112, 2024, 4832
137 }, 138 },
138 { 139 {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
168 "LEN0049", 169 "LEN0049",
169 "LEN2000", 170 "LEN2000",
170 "LEN2001", /* Edge E431 */ 171 "LEN2001", /* Edge E431 */
171 "LEN2002", 172 "LEN2002", /* Edge E531 */
172 "LEN2003", 173 "LEN2003",
173 "LEN2004", /* L440 */ 174 "LEN2004", /* L440 */
174 "LEN2005", 175 "LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c561..136b7b204f56 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
402 }, 402 },
403 }, 403 },
404 { 404 {
405 /* Acer Aspire 5710 */
406 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
408 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
409 },
410 },
411 {
405 /* Gericom Bellagio */ 412 /* Gericom Bellagio */
406 .matches = { 413 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), 414 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2e..e73cf2c71f35 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1217 * a=(pi*r^2)/C. 1217 * a=(pi*r^2)/C.
1218 */ 1218 */
1219 int a = data[5]; 1219 int a = data[5];
1220 int x_res = input_abs_get_res(input, ABS_X); 1220 int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
1221 int y_res = input_abs_get_res(input, ABS_Y); 1221 int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); 1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
1223 height = width * y_res / x_res; 1223 height = width * y_res / x_res;
1224 } 1224 }
1225 1225
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
1589 } else { 1589 } else {
1590 if (features->touch_max <= 2) { 1590 if (features->touch_max == 1) {
1591 input_set_abs_params(input_dev, ABS_X, 0, 1591 input_set_abs_params(input_dev, ABS_X, 0,
1592 features->x_max, features->x_fuzz, 0); 1592 features->x_max, features->x_fuzz, 0);
1593 input_set_abs_params(input_dev, ABS_Y, 0, 1593 input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1815 case MTTPC: 1815 case MTTPC:
1816 case MTTPC_B: 1816 case MTTPC_B:
1817 case TABLETPC2FG: 1817 case TABLETPC2FG:
1818 if (features->device_type == BTN_TOOL_FINGER) { 1818 if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
1819 unsigned int flags = INPUT_MT_DIRECT; 1819 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
1820
1821 if (wacom_wac->features.type == TABLETPC2FG)
1822 flags = 0;
1823
1824 input_mt_init_slots(input_dev, features->touch_max, flags);
1825 }
1826 /* fall through */ 1820 /* fall through */
1827 1821
1828 case TABLETPC: 1822 case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1883 __set_bit(BTN_RIGHT, input_dev->keybit); 1877 __set_bit(BTN_RIGHT, input_dev->keybit);
1884 1878
1885 if (features->touch_max) { 1879 if (features->touch_max) {
1886 /* touch interface */
1887 unsigned int flags = INPUT_MT_POINTER;
1888
1889 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1890 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 1880 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1891 input_set_abs_params(input_dev, 1881 input_set_abs_params(input_dev,
1892 ABS_MT_TOUCH_MAJOR, 1882 ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1894 input_set_abs_params(input_dev, 1884 input_set_abs_params(input_dev,
1895 ABS_MT_TOUCH_MINOR, 1885 ABS_MT_TOUCH_MINOR,
1896 0, features->y_max, 0, 0); 1886 0, features->y_max, 0, 0);
1897 } else {
1898 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1899 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1900 flags = 0;
1901 } 1887 }
1902 input_mt_init_slots(input_dev, features->touch_max, flags); 1888 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
1903 } else { 1889 } else {
1904 /* buttons/keys only interface */ 1890 /* buttons/keys only interface */
1905 __clear_bit(ABS_X, input_dev->absbit); 1891 __clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361f..2ce649520fe0 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
359 */ 359 */
360 err = of_property_read_u32(node, "ti,coordinate-readouts", 360 err = of_property_read_u32(node, "ti,coordinate-readouts",
361 &ts_dev->coordinate_readouts); 361 &ts_dev->coordinate_readouts);
362 if (err < 0) 362 if (err < 0) {
363 dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
363 err = of_property_read_u32(node, "ti,coordiante-readouts", 364 err = of_property_read_u32(node, "ti,coordiante-readouts",
364 &ts_dev->coordinate_readouts); 365 &ts_dev->coordinate_readouts);
366 }
367
365 if (err < 0) 368 if (err < 0)
366 return err; 369 return err;
367 370
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a333b7f798d1..62f0688d45a5 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -638,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
638 fprog.len = len; 638 fprog.len = len;
639 fprog.filter = code; 639 fprog.filter = code;
640 640
641 if (is->pass_filter) 641 if (is->pass_filter) {
642 sk_unattached_filter_destroy(is->pass_filter); 642 sk_unattached_filter_destroy(is->pass_filter);
643 err = sk_unattached_filter_create(&is->pass_filter, &fprog); 643 is->pass_filter = NULL;
644 }
645 if (fprog.filter != NULL)
646 err = sk_unattached_filter_create(&is->pass_filter,
647 &fprog);
648 else
649 err = 0;
644 kfree(code); 650 kfree(code);
645 651
646 return err; 652 return err;
@@ -657,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
657 fprog.len = len; 663 fprog.len = len;
658 fprog.filter = code; 664 fprog.filter = code;
659 665
660 if (is->active_filter) 666 if (is->active_filter) {
661 sk_unattached_filter_destroy(is->active_filter); 667 sk_unattached_filter_destroy(is->active_filter);
662 err = sk_unattached_filter_create(&is->active_filter, &fprog); 668 is->active_filter = NULL;
669 }
670 if (fprog.filter != NULL)
671 err = sk_unattached_filter_create(&is->active_filter,
672 &fprog);
673 else
674 err = 0;
663 kfree(code); 675 kfree(code);
664 676
665 return err; 677 return err;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed7623..2e3cdcfa0a67 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
60 jiffies_to_msecs(jiffies) - 60 jiffies_to_msecs(jiffies) -
61 (jiffies_to_msecs(timeout) - TIMEOUT)); 61 (jiffies_to_msecs(timeout) - TIMEOUT));
62 62
63 if (!(cmd->args[0] >> 7) & 0x01) { 63 if (!((cmd->args[0] >> 7) & 0x01)) {
64 ret = -ETIMEDOUT; 64 ret = -ETIMEDOUT;
65 goto err_mutex_unlock; 65 goto err_mutex_unlock;
66 } 66 }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
485 if (ret) 485 if (ret)
486 goto err; 486 goto err;
487 487
488 cmd.args[0] = 0x05;
489 cmd.args[1] = 0x00;
490 cmd.args[2] = 0xaa;
491 cmd.args[3] = 0x4d;
492 cmd.args[4] = 0x56;
493 cmd.args[5] = 0x40;
494 cmd.args[6] = 0x00;
495 cmd.args[7] = 0x00;
496 cmd.wlen = 8;
497 cmd.rlen = 1;
498 ret = si2168_cmd_execute(s, &cmd);
499 if (ret)
500 goto err;
501
502 /* cold state - try to download firmware */ 488 /* cold state - try to download firmware */
503 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", 489 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
504 KBUILD_MODNAME, si2168_ops.info.name); 490 KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f40..53f7f06ae343 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/i2c-mux.h> 23#include <linux/i2c-mux.h>
24 24
25#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" 25#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
26 26
27/* state struct */ 27/* state struct */
28struct si2168 { 28struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5eee..9619be5d4827 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
668 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 668 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
669 int ret, i; 669 int ret, i;
670 u8 mode, rolloff, pilot, inversion, div; 670 u8 mode, rolloff, pilot, inversion, div;
671 fe_modulation_t modulation;
671 672
672 dev_dbg(&priv->i2c->dev, 673 dev_dbg(&priv->i2c->dev,
673 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", 674 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
702 703
703 switch (c->delivery_system) { 704 switch (c->delivery_system) {
704 case SYS_DVBS: 705 case SYS_DVBS:
706 modulation = QPSK;
705 rolloff = 0; 707 rolloff = 0;
706 pilot = 2; 708 pilot = 2;
707 break; 709 break;
708 case SYS_DVBS2: 710 case SYS_DVBS2:
711 modulation = c->modulation;
712
709 switch (c->rolloff) { 713 switch (c->rolloff) {
710 case ROLLOFF_20: 714 case ROLLOFF_20:
711 rolloff = 2; 715 rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
750 754
751 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { 755 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
752 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && 756 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
753 c->modulation == TDA10071_MODCOD[i].modulation && 757 modulation == TDA10071_MODCOD[i].modulation &&
754 c->fec_inner == TDA10071_MODCOD[i].fec) { 758 c->fec_inner == TDA10071_MODCOD[i].fec) {
755 mode = TDA10071_MODCOD[i].val; 759 mode = TDA10071_MODCOD[i].val;
756 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", 760 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
834 838
835 switch ((buf[1] >> 0) & 0x01) { 839 switch ((buf[1] >> 0) & 0x01) {
836 case 0: 840 case 0:
837 c->inversion = INVERSION_OFF; 841 c->inversion = INVERSION_ON;
838 break; 842 break;
839 case 1: 843 case 1:
840 c->inversion = INVERSION_ON; 844 c->inversion = INVERSION_OFF;
841 break; 845 break;
842 } 846 }
843 847
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
856 if (ret) 860 if (ret)
857 goto error; 861 goto error;
858 862
859 c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); 863 c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
860 864
861 return ret; 865 return ret;
862error: 866error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65a..420486192736 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, 55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, 56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
57 /* 8PSK */ 57 /* 8PSK */
58 { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
58 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, 59 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
59 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, 60 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
60 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, 61 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8b..0006d6bf8c18 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
179 .read = vb2_fop_read, 179 .read = vb2_fop_read,
180 .poll = vb2_fop_poll, 180 .poll = vb2_fop_poll,
181 .mmap = vb2_fop_mmap, 181 .mmap = vb2_fop_mmap,
182 .ioctl = video_ioctl2, 182 .unlocked_ioctl = video_ioctl2,
183}; 183};
184 184
185static const struct v4l2_ioctl_ops ts_ioctl_ops = { 185static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed16497903..1e4ec697fb10 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
269 list_del(&buf->list); 269 list_del(&buf->list);
270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
271 } 271 }
272 spin_unlock_irqrestore(&common->irqlock, flags);
272 273
273 return ret; 274 return ret;
274} 275}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bcb..b431b58f39e3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
233 list_del(&buf->list); 233 list_del(&buf->list);
234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
235 } 235 }
236 spin_unlock_irqrestore(&common->irqlock, flags);
236 237
237 return ret; 238 return ret;
238} 239}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee54..fa4cc7b880aa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
57 jiffies_to_msecs(jiffies) - 57 jiffies_to_msecs(jiffies) -
58 (jiffies_to_msecs(timeout) - TIMEOUT)); 58 (jiffies_to_msecs(timeout) - TIMEOUT));
59 59
60 if (!(buf[0] >> 7) & 0x01) { 60 if (!((buf[0] >> 7) & 0x01)) {
61 ret = -ETIMEDOUT; 61 ret = -ETIMEDOUT;
62 goto err_mutex_unlock; 62 goto err_mutex_unlock;
63 } else { 63 } else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d7..7b9b75f60774 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
704 if (ret < 0) 704 if (ret < 0)
705 goto err; 705 goto err;
706 706
707 if (tmp == 0x00) 707 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
708 dev_dbg(&d->udev->dev, 708 __func__, i, tmp);
709 "%s: [%d]tuner not set, using default\n", 709
710 __func__, i); 710 /* tuner sanity check */
711 else 711 if (state->chip_type == 0x9135) {
712 if (state->chip_version == 0x02) {
713 /* IT9135 BX (v2) */
714 switch (tmp) {
715 case AF9033_TUNER_IT9135_60:
716 case AF9033_TUNER_IT9135_61:
717 case AF9033_TUNER_IT9135_62:
718 state->af9033_config[i].tuner = tmp;
719 break;
720 }
721 } else {
722 /* IT9135 AX (v1) */
723 switch (tmp) {
724 case AF9033_TUNER_IT9135_38:
725 case AF9033_TUNER_IT9135_51:
726 case AF9033_TUNER_IT9135_52:
727 state->af9033_config[i].tuner = tmp;
728 break;
729 }
730 }
731 } else {
732 /* AF9035 */
712 state->af9033_config[i].tuner = tmp; 733 state->af9033_config[i].tuner = tmp;
734 }
713 735
714 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", 736 if (state->af9033_config[i].tuner != tmp) {
715 __func__, i, state->af9033_config[i].tuner); 737 dev_info(&d->udev->dev,
738 "%s: [%d] overriding tuner from %02x to %02x\n",
739 KBUILD_MODNAME, i, tmp,
740 state->af9033_config[i].tuner);
741 }
716 742
717 switch (state->af9033_config[i].tuner) { 743 switch (state->af9033_config[i].tuner) {
718 case AF9033_TUNER_TUA9001: 744 case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0f..339adce7c7a5 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
928 {USB_DEVICE(0x093a, 0x2620)}, 928 {USB_DEVICE(0x093a, 0x2620)},
929 {USB_DEVICE(0x093a, 0x2621)}, 929 {USB_DEVICE(0x093a, 0x2621)},
930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, 930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, 932 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
932 {USB_DEVICE(0x093a, 0x2625)}, 933 {USB_DEVICE(0x093a, 0x2625)},
933 {USB_DEVICE(0x093a, 0x2626)}, 934 {USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5f..6bce01a674f9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
82} 82}
83 83
84/*=========================================================================*/ 84/*=========================================================================*/
85/* bufffer bits */ 85/* buffer bits */
86 86
87/* function expects dev->io_mutex to be hold by caller */ 87/* function expects dev->io_mutex to be hold by caller */
88int hdpvr_cancel_queue(struct hdpvr_device *dev) 88int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
926 case V4L2_CID_MPEG_AUDIO_ENCODING: 926 case V4L2_CID_MPEG_AUDIO_ENCODING:
927 if (dev->flags & HDPVR_FLAG_AC3_CAP) { 927 if (dev->flags & HDPVR_FLAG_AC3_CAP) {
928 opt->audio_codec = ctrl->val; 928 opt->audio_codec = ctrl->val;
929 return hdpvr_set_audio(dev, opt->audio_input, 929 return hdpvr_set_audio(dev, opt->audio_input + 1,
930 opt->audio_codec); 930 opt->audio_codec);
931 } 931 }
932 return 0; 932 return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1199 V4L2_CID_MPEG_AUDIO_ENCODING, 1199 V4L2_CID_MPEG_AUDIO_ENCODING,
1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
1201 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); 1201 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1203 V4L2_CID_MPEG_VIDEO_ENCODING, 1203 V4L2_CID_MPEG_VIDEO_ENCODING,
1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, 1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd03..ce1c9f5d9dee 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
610 aspect.denominator = 9; 610 aspect.denominator = 9;
611 } else if (ratio == 34) { 611 } else if (ratio == 34) {
612 aspect.numerator = 4; 612 aspect.numerator = 4;
613 aspect.numerator = 3; 613 aspect.denominator = 3;
614 } else if (ratio == 68) { 614 } else if (ratio == 68) {
615 aspect.numerator = 15; 615 aspect.numerator = 15;
616 aspect.numerator = 9; 616 aspect.denominator = 9;
617 } else { 617 } else {
618 aspect.numerator = hor_landscape + 99; 618 aspect.numerator = hor_landscape + 99;
619 aspect.denominator = 100; 619 aspect.denominator = 100;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 14c00048bbec..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -129,14 +129,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
129 name); 129 name);
130 } 130 }
131 131
132 cq->irq_desc =
133 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
134 cq->vector));
135 } 132 }
136 } else { 133 } else {
137 cq->vector = (cq->ring + 1 + priv->port) % 134 cq->vector = (cq->ring + 1 + priv->port) %
138 mdev->dev->caps.num_comp_vectors; 135 mdev->dev->caps.num_comp_vectors;
139 } 136 }
137
138 cq->irq_desc =
139 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
140 cq->vector));
140 } else { 141 } else {
141 /* For TX we use the same irq per 142 /* For TX we use the same irq per
142 ring we assigned for the RX */ 143 ring we assigned for the RX */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 06bdc31a828d..61623e9af574 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4240,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4240 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4240 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4241 break; 4241 break;
4242 case RTL_GIGA_MAC_VER_40: 4242 case RTL_GIGA_MAC_VER_40:
4243 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4244 break;
4243 case RTL_GIGA_MAC_VER_41: 4245 case RTL_GIGA_MAC_VER_41:
4244 case RTL_GIGA_MAC_VER_42: 4246 case RTL_GIGA_MAC_VER_42:
4245 case RTL_GIGA_MAC_VER_43: 4247 case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..fd411d6e19a2 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1083 return vp; 1083 return vp;
1084} 1084}
1085 1085
1086static void vnet_cleanup(void)
1087{
1088 struct vnet *vp;
1089 struct net_device *dev;
1090
1091 mutex_lock(&vnet_list_mutex);
1092 while (!list_empty(&vnet_list)) {
1093 vp = list_first_entry(&vnet_list, struct vnet, list);
1094 list_del(&vp->list);
1095 dev = vp->dev;
1096 /* vio_unregister_driver() should have cleaned up port_list */
1097 BUG_ON(!list_empty(&vp->port_list));
1098 unregister_netdev(dev);
1099 free_netdev(dev);
1100 }
1101 mutex_unlock(&vnet_list_mutex);
1102}
1103
1086static const char *local_mac_prop = "local-mac-address"; 1104static const char *local_mac_prop = "local-mac-address";
1087 1105
1088static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1106static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
1240 1258
1241 kfree(port); 1259 kfree(port);
1242 1260
1243 unregister_netdev(vp->dev);
1244 } 1261 }
1245 return 0; 1262 return 0;
1246} 1263}
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
1268static void __exit vnet_exit(void) 1285static void __exit vnet_exit(void)
1269{ 1286{
1270 vio_unregister_driver(&vnet_port_driver); 1287 vio_unregister_driver(&vnet_port_driver);
1288 vnet_cleanup();
1271} 1289}
1272 1290
1273module_init(vnet_init); 1291module_init(vnet_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e2f20f807de8..d5b77ef3a210 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
757 }; 757 };
758 758
759 ppp_lock(ppp); 759 ppp_lock(ppp);
760 if (ppp->pass_filter) 760 if (ppp->pass_filter) {
761 sk_unattached_filter_destroy(ppp->pass_filter); 761 sk_unattached_filter_destroy(ppp->pass_filter);
762 err = sk_unattached_filter_create(&ppp->pass_filter, 762 ppp->pass_filter = NULL;
763 &fprog); 763 }
764 if (fprog.filter != NULL)
765 err = sk_unattached_filter_create(&ppp->pass_filter,
766 &fprog);
767 else
768 err = 0;
764 kfree(code); 769 kfree(code);
765 ppp_unlock(ppp); 770 ppp_unlock(ppp);
766 } 771 }
@@ -778,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
778 }; 783 };
779 784
780 ppp_lock(ppp); 785 ppp_lock(ppp);
781 if (ppp->active_filter) 786 if (ppp->active_filter) {
782 sk_unattached_filter_destroy(ppp->active_filter); 787 sk_unattached_filter_destroy(ppp->active_filter);
783 err = sk_unattached_filter_create(&ppp->active_filter, 788 ppp->active_filter = NULL;
784 &fprog); 789 }
790 if (fprog.filter != NULL)
791 err = sk_unattached_filter_create(&ppp->active_filter,
792 &fprog);
793 else
794 err = 0;
785 kfree(code); 795 kfree(code);
786 ppp_unlock(ppp); 796 ppp_unlock(ppp);
787 } 797 }
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
195 .driver_info = (unsigned long)&huawei_cdc_ncm_info, 195 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
196 }, 196 },
197 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
198 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
199 },
197 200
198 /* Terminating entry */ 201 /* Terminating entry */
199 { 202 {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c4638c67f6b9..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 671 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 672 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 673 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -757,6 +758,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 758 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
758 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ 759 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
759 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 760 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
760 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 763 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
762 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 764 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f1978691..fa9fdfa128c1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
122{ 122{
123 struct x25_asy *sl = netdev_priv(dev); 123 struct x25_asy *sl = netdev_priv(dev);
124 unsigned char *xbuff, *rbuff; 124 unsigned char *xbuff, *rbuff;
125 int len = 2 * newmtu; 125 int len;
126 126
127 if (newmtu > 65534)
128 return -EINVAL;
129
130 len = 2 * newmtu;
127 xbuff = kmalloc(len + 4, GFP_ATOMIC); 131 xbuff = kmalloc(len + 4, GFP_ATOMIC);
128 rbuff = kmalloc(len + 4, GFP_ATOMIC); 132 rbuff = kmalloc(len + 4, GFP_ATOMIC);
129 133
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1030{ 1030{
1031 struct gnttab_map_grant_ref *gop_map = *gopp_map; 1031 struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033 /* This always points to the shinfo of the skb being checked, which
1034 * could be either the first or the one on the frag_list
1035 */
1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1036 struct skb_shared_info *shinfo = skb_shinfo(skb);
1037 /* If this is non-NULL, we are currently checking the frag_list skb, and
1038 * this points to the shinfo of the first one
1039 */
1040 struct skb_shared_info *first_shinfo = NULL;
1034 int nr_frags = shinfo->nr_frags; 1041 int nr_frags = shinfo->nr_frags;
1042 const bool sharedslot = nr_frags &&
1043 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1035 int i, err; 1044 int i, err;
1036 struct sk_buff *first_skb = NULL;
1037 1045
1038 /* Check status of header. */ 1046 /* Check status of header. */
1039 err = (*gopp_copy)->status; 1047 err = (*gopp_copy)->status;
1040 (*gopp_copy)++;
1041 if (unlikely(err)) { 1048 if (unlikely(err)) {
1042 if (net_ratelimit()) 1049 if (net_ratelimit())
1043 netdev_dbg(queue->vif->dev, 1050 netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1045 (*gopp_copy)->status, 1052 (*gopp_copy)->status,
1046 pending_idx, 1053 pending_idx,
1047 (*gopp_copy)->source.u.ref); 1054 (*gopp_copy)->source.u.ref);
1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1055 /* The first frag might still have this slot mapped */
1056 if (!sharedslot)
1057 xenvif_idx_release(queue, pending_idx,
1058 XEN_NETIF_RSP_ERROR);
1049 } 1059 }
1060 (*gopp_copy)++;
1050 1061
1051check_frags: 1062check_frags:
1052 for (i = 0; i < nr_frags; i++, gop_map++) { 1063 for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
1062 pending_idx, 1073 pending_idx,
1063 gop_map->handle); 1074 gop_map->handle);
1064 /* Had a previous error? Invalidate this fragment. */ 1075 /* Had a previous error? Invalidate this fragment. */
1065 if (unlikely(err)) 1076 if (unlikely(err)) {
1066 xenvif_idx_unmap(queue, pending_idx); 1077 xenvif_idx_unmap(queue, pending_idx);
1078 /* If the mapping of the first frag was OK, but
1079 * the header's copy failed, and they are
1080 * sharing a slot, send an error
1081 */
1082 if (i == 0 && sharedslot)
1083 xenvif_idx_release(queue, pending_idx,
1084 XEN_NETIF_RSP_ERROR);
1085 else
1086 xenvif_idx_release(queue, pending_idx,
1087 XEN_NETIF_RSP_OKAY);
1088 }
1067 continue; 1089 continue;
1068 } 1090 }
1069 1091
@@ -1075,42 +1097,53 @@ check_frags:
1075 gop_map->status, 1097 gop_map->status,
1076 pending_idx, 1098 pending_idx,
1077 gop_map->ref); 1099 gop_map->ref);
1100
1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1101 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1079 1102
1080 /* Not the first error? Preceding frags already invalidated. */ 1103 /* Not the first error? Preceding frags already invalidated. */
1081 if (err) 1104 if (err)
1082 continue; 1105 continue;
1083 /* First error: invalidate preceding fragments. */ 1106
1107 /* First error: if the header haven't shared a slot with the
1108 * first frag, release it as well.
1109 */
1110 if (!sharedslot)
1111 xenvif_idx_release(queue,
1112 XENVIF_TX_CB(skb)->pending_idx,
1113 XEN_NETIF_RSP_OKAY);
1114
1115 /* Invalidate preceding fragments of this skb. */
1084 for (j = 0; j < i; j++) { 1116 for (j = 0; j < i; j++) {
1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1117 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1086 xenvif_idx_unmap(queue, pending_idx); 1118 xenvif_idx_unmap(queue, pending_idx);
1119 xenvif_idx_release(queue, pending_idx,
1120 XEN_NETIF_RSP_OKAY);
1121 }
1122
1123 /* And if we found the error while checking the frag_list, unmap
1124 * the first skb's frags
1125 */
1126 if (first_shinfo) {
1127 for (j = 0; j < first_shinfo->nr_frags; j++) {
1128 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129 xenvif_idx_unmap(queue, pending_idx);
1130 xenvif_idx_release(queue, pending_idx,
1131 XEN_NETIF_RSP_OKAY);
1132 }
1087 } 1133 }
1088 1134
1089 /* Remember the error: invalidate all subsequent fragments. */ 1135 /* Remember the error: invalidate all subsequent fragments. */
1090 err = newerr; 1136 err = newerr;
1091 } 1137 }
1092 1138
1093 if (skb_has_frag_list(skb)) { 1139 if (skb_has_frag_list(skb) && !first_shinfo) {
1094 first_skb = skb; 1140 first_shinfo = skb_shinfo(skb);
1095 skb = shinfo->frag_list; 1141 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1096 shinfo = skb_shinfo(skb);
1097 nr_frags = shinfo->nr_frags; 1142 nr_frags = shinfo->nr_frags;
1098 1143
1099 goto check_frags; 1144 goto check_frags;
1100 } 1145 }
1101 1146
1102 /* There was a mapping error in the frag_list skb. We have to unmap
1103 * the first skb's frags
1104 */
1105 if (first_skb && err) {
1106 int j;
1107 shinfo = skb_shinfo(first_skb);
1108 for (j = 0; j < shinfo->nr_frags; j++) {
1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1110 xenvif_idx_unmap(queue, pending_idx);
1111 }
1112 }
1113
1114 *gopp_map = gop_map; 1147 *gopp_map = gop_map;
1115 return err; 1148 return err;
1116} 1149}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1518 1551
1519 /* Check the remap error code. */ 1552 /* Check the remap error code. */
1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1553 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1554 /* If there was an error, xenvif_tx_check_gop is
1555 * expected to release all the frags which were mapped,
1556 * so kfree_skb shouldn't do it again
1557 */
1521 skb_shinfo(skb)->nr_frags = 0; 1558 skb_shinfo(skb)->nr_frags = 0;
1559 if (skb_has_frag_list(skb)) {
1560 struct sk_buff *nskb =
1561 skb_shinfo(skb)->frag_list;
1562 skb_shinfo(nskb)->nr_frags = 0;
1563 }
1522 kfree_skb(skb); 1564 kfree_skb(skb);
1523 continue; 1565 continue;
1524 } 1566 }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1822 tx_unmap_op.status); 1864 tx_unmap_op.status);
1823 BUG(); 1865 BUG();
1824 } 1866 }
1825
1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1827} 1867}
1828 1868
1829static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece81f35..44333bd8f908 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
5# Parport configuration. 5# Parport configuration.
6# 6#
7 7
8config ARCH_MIGHT_HAVE_PC_PARPORT
9 bool
10 help
11 Select this config option from the architecture Kconfig if
12 the architecture might have PC parallel port hardware.
13
8menuconfig PARPORT 14menuconfig PARPORT
9 tristate "Parallel port support" 15 tristate "Parallel port support"
10 depends on HAS_IOMEM 16 depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
31 37
32 If unsure, say Y. 38 If unsure, say Y.
33 39
34config ARCH_MIGHT_HAVE_PC_PARPORT
35 bool
36 help
37 Select this config option from the architecture Kconfig if
38 the architecture might have PC parallel port hardware.
39
40if PARPORT 40if PARPORT
41 41
42config PARPORT_PC 42config PARPORT_PC
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95e..9f43916637ca 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
1431 1431
1432 status = readl(info->irqmux_base); 1432 status = readl(info->irqmux_base);
1433 1433
1434 for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) 1434 for_each_set_bit(n, &status, info->nbanks)
1435 __gpio_irq_handler(&info->banks[n]); 1435 __gpio_irq_handler(&info->banks[n]);
1436 1436
1437 chained_irq_exit(chip, desc); 1437 chained_irq_exit(chip, desc);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f8656..220acb4cbee5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
633 } else 633 } else
634 raw3270_writesf_readpart(rp); 634 raw3270_writesf_readpart(rp);
635 memset(&rp->init_reset, 0, sizeof(rp->init_reset)); 635 memset(&rp->init_reset, 0, sizeof(rp->init_reset));
636 memset(&rp->init_data, 0, sizeof(rp->init_data));
637} 636}
638 637
639static int 638static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac8..4038437ff033 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
901 int rc; 901 int rc;
902 902
903 ap_dev->drv = ap_drv; 903 ap_dev->drv = ap_drv;
904
905 spin_lock_bh(&ap_device_list_lock);
906 list_add(&ap_dev->list, &ap_device_list);
907 spin_unlock_bh(&ap_device_list_lock);
908
904 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 909 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
905 if (!rc) { 910 if (rc) {
906 spin_lock_bh(&ap_device_list_lock); 911 spin_lock_bh(&ap_device_list_lock);
907 list_add(&ap_dev->list, &ap_device_list); 912 list_del_init(&ap_dev->list);
908 spin_unlock_bh(&ap_device_list_lock); 913 spin_unlock_bh(&ap_device_list_lock);
909 } 914 }
910 return rc; 915 return rc;
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index 160e7510aca6..0787b9756165 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -452,6 +452,9 @@ static void attach_one_temp(struct bbc_i2c_bus *bp, struct platform_device *op,
452 if (!tp) 452 if (!tp)
453 return; 453 return;
454 454
455 INIT_LIST_HEAD(&tp->bp_list);
456 INIT_LIST_HEAD(&tp->glob_list);
457
455 tp->client = bbc_i2c_attach(bp, op); 458 tp->client = bbc_i2c_attach(bp, op);
456 if (!tp->client) { 459 if (!tp->client) {
457 kfree(tp); 460 kfree(tp);
@@ -497,6 +500,9 @@ static void attach_one_fan(struct bbc_i2c_bus *bp, struct platform_device *op,
497 if (!fp) 500 if (!fp)
498 return; 501 return;
499 502
503 INIT_LIST_HEAD(&fp->bp_list);
504 INIT_LIST_HEAD(&fp->glob_list);
505
500 fp->client = bbc_i2c_attach(bp, op); 506 fp->client = bbc_i2c_attach(bp, op);
501 if (!fp->client) { 507 if (!fp->client) {
502 kfree(fp); 508 kfree(fp);
diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c
index c7763e482eb2..812b5f0361b6 100644
--- a/drivers/sbus/char/bbc_i2c.c
+++ b/drivers/sbus/char/bbc_i2c.c
@@ -300,13 +300,18 @@ static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index
300 if (!bp) 300 if (!bp)
301 return NULL; 301 return NULL;
302 302
303 INIT_LIST_HEAD(&bp->temps);
304 INIT_LIST_HEAD(&bp->fans);
305
303 bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs"); 306 bp->i2c_control_regs = of_ioremap(&op->resource[0], 0, 0x2, "bbc_i2c_regs");
304 if (!bp->i2c_control_regs) 307 if (!bp->i2c_control_regs)
305 goto fail; 308 goto fail;
306 309
307 bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel"); 310 if (op->num_resources == 2) {
308 if (!bp->i2c_bussel_reg) 311 bp->i2c_bussel_reg = of_ioremap(&op->resource[1], 0, 0x1, "bbc_i2c_bussel");
309 goto fail; 312 if (!bp->i2c_bussel_reg)
313 goto fail;
314 }
310 315
311 bp->waiting = 0; 316 bp->waiting = 0;
312 init_waitqueue_head(&bp->wq); 317 init_waitqueue_head(&bp->wq);
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047e..8afc6fee40c5 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_OMAP4 1config VIDEO_OMAP4
2 bool "OMAP 4 Camera support" 2 bool "OMAP 4 Camera support"
3 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
4 select VIDEOBUF2_DMA_CONTIG 4 select VIDEOBUF2_DMA_CONTIG
5 ---help--- 5 ---help---
6 Driver for an OMAP 4 ISS controller. 6 Driver for an OMAP 4 ISS controller.
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 2f57df9a71d9..a1e09c0d46f2 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -157,6 +157,15 @@ receive_chars(struct uart_sunsab_port *up,
157 (up->port.line == up->port.cons->index)) 157 (up->port.line == up->port.cons->index))
158 saw_console_brk = 1; 158 saw_console_brk = 1;
159 159
160 if (count == 0) {
161 if (unlikely(stat->sreg.isr1 & SAB82532_ISR1_BRK)) {
162 stat->sreg.isr0 &= ~(SAB82532_ISR0_PERR |
163 SAB82532_ISR0_FERR);
164 up->port.icount.brk++;
165 uart_handle_break(&up->port);
166 }
167 }
168
160 for (i = 0; i < count; i++) { 169 for (i = 0; i < count; i++) {
161 unsigned char ch = buf[i], flag; 170 unsigned char ch = buf[i], flag;
162 171
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528fb640e..a93f7e6ea4cf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
306 if (unlikely(nr < 0)) 306 if (unlikely(nr < 0))
307 return nr; 307 return nr;
308 308
309 tsk->flags = PF_DUMPCORE; 309 tsk->flags |= PF_DUMPCORE;
310 if (atomic_read(&mm->mm_users) == nr + 1) 310 if (atomic_read(&mm->mm_users) == nr + 1)
311 goto done; 311 goto done;
312 /* 312 /*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 98040ba388ac..194d0d122cae 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
198 * L1 cache. 198 * L1 cache.
199 */ 199 */
200static inline struct page *dio_get_page(struct dio *dio, 200static inline struct page *dio_get_page(struct dio *dio,
201 struct dio_submit *sdio, size_t *from, size_t *to) 201 struct dio_submit *sdio)
202{ 202{
203 int n;
204 if (dio_pages_present(sdio) == 0) { 203 if (dio_pages_present(sdio) == 0) {
205 int ret; 204 int ret;
206 205
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio,
209 return ERR_PTR(ret); 208 return ERR_PTR(ret);
210 BUG_ON(dio_pages_present(sdio) == 0); 209 BUG_ON(dio_pages_present(sdio) == 0);
211 } 210 }
212 n = sdio->head++; 211 return dio->pages[sdio->head];
213 *from = n ? 0 : sdio->from;
214 *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
215 return dio->pages[n];
216} 212}
217 213
218/** 214/**
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
911 while (sdio->block_in_file < sdio->final_block_in_request) { 907 while (sdio->block_in_file < sdio->final_block_in_request) {
912 struct page *page; 908 struct page *page;
913 size_t from, to; 909 size_t from, to;
914 page = dio_get_page(dio, sdio, &from, &to); 910
911 page = dio_get_page(dio, sdio);
915 if (IS_ERR(page)) { 912 if (IS_ERR(page)) {
916 ret = PTR_ERR(page); 913 ret = PTR_ERR(page);
917 goto out; 914 goto out;
918 } 915 }
916 from = sdio->head ? 0 : sdio->from;
917 to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
918 sdio->head++;
919 919
920 while (from < to) { 920 while (from < to) {
921 unsigned this_chunk_bytes; /* # of bytes mapped */ 921 unsigned this_chunk_bytes; /* # of bytes mapped */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8474028d7848..03246cd9d47a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -907,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
907 fc->writeback_cache = 1; 907 fc->writeback_cache = 1;
908 if (arg->time_gran && arg->time_gran <= 1000000000) 908 if (arg->time_gran && arg->time_gran <= 1000000000)
909 fc->sb->s_time_gran = arg->time_gran; 909 fc->sb->s_time_gran = arg->time_gran;
910 else
911 fc->sb->s_time_gran = 1000000000;
912
913 } else { 910 } else {
914 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 911 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
915 fc->no_lock = 1; 912 fc->no_lock = 1;
@@ -938,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
938 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 935 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
939 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 936 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
940 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 937 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
941 FUSE_WRITEBACK_CACHE; 938 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
942 req->in.h.opcode = FUSE_INIT; 939 req->in.h.opcode = FUSE_INIT;
943 req->in.numargs = 1; 940 req->in.numargs = 1;
944 req->in.args[0].size = sizeof(*arg); 941 req->in.args[0].size = sizeof(*arg);
diff --git a/fs/namei.c b/fs/namei.c
index 985c6f368485..9eb787e5c167 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2256,9 +2256,10 @@ done:
2256 goto out; 2256 goto out;
2257 } 2257 }
2258 path->dentry = dentry; 2258 path->dentry = dentry;
2259 path->mnt = mntget(nd->path.mnt); 2259 path->mnt = nd->path.mnt;
2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW)) 2260 if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
2261 return 1; 2261 return 1;
2262 mntget(path->mnt);
2262 follow_mount(path); 2263 follow_mount(path);
2263 error = 0; 2264 error = 0;
2264out: 2265out:
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b56b1cc02718..944275c8f56d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2879,6 +2879,7 @@ again:
2879 * return the conflicting open: 2879 * return the conflicting open:
2880 */ 2880 */
2881 if (conf->len) { 2881 if (conf->len) {
2882 kfree(conf->data);
2882 conf->len = 0; 2883 conf->len = 0;
2883 conf->data = NULL; 2884 conf->data = NULL;
2884 goto again; 2885 goto again;
@@ -2891,6 +2892,7 @@ again:
2891 if (conf->len) { 2892 if (conf->len) {
2892 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); 2893 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
2893 p = xdr_encode_opaque(p, conf->data, conf->len); 2894 p = xdr_encode_opaque(p, conf->data, conf->len);
2895 kfree(conf->data);
2894 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ 2896 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
2895 p = xdr_encode_hyper(p, (u64)0); /* clientid */ 2897 p = xdr_encode_hyper(p, (u64)0); /* clientid */
2896 *p++ = cpu_to_be32(0); /* length of owner name */ 2898 *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
2907 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); 2909 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
2908 else if (nfserr == nfserr_denied) 2910 else if (nfserr == nfserr_denied)
2909 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); 2911 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
2910 kfree(lock->lk_denied.ld_owner.data); 2912
2911 return nfserr; 2913 return nfserr;
2912} 2914}
2913 2915
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff18404..c69e6d43a0d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
843 843
844 /* wrap around? */ 844 /* wrap around? */
845 len = sizeof(*new_xattr) + size; 845 len = sizeof(*new_xattr) + size;
846 if (len <= sizeof(*new_xattr)) 846 if (len < sizeof(*new_xattr))
847 return NULL; 847 return NULL;
848 848
849 new_xattr = kmalloc(len, GFP_KERNEL); 849 new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
593 struct device *dev; 593 struct device *dev;
594 void __iomem * const *iomap; 594 void __iomem * const *iomap;
595 unsigned int n_ports; 595 unsigned int n_ports;
596 unsigned int n_tags; /* nr of NCQ tags */
596 void *private_data; 597 void *private_data;
597 struct ata_port_operations *ops; 598 struct ata_port_operations *ops;
598 unsigned long flags; 599 unsigned long flags;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
399} 399}
400 400
401/* 401/*
402 * Get the offset in PAGE_SIZE.
403 * (TODO: hugepage should have ->index in PAGE_SIZE)
404 */
405static inline pgoff_t page_to_pgoff(struct page *page)
406{
407 if (unlikely(PageHeadHuge(page)))
408 return page->index << compound_order(page);
409 else
410 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411}
412
413/*
402 * Return byte-offset into filesystem object for page. 414 * Return byte-offset into filesystem object for page.
403 */ 415 */
404static inline loff_t page_offset(struct page *page) 416static inline loff_t page_offset(struct page *page)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b88bd5a..c4d86198d3d6 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
6#include <linux/netfilter/nfnetlink.h> 6#include <linux/netfilter/nfnetlink.h>
7#include <linux/netfilter/x_tables.h> 7#include <linux/netfilter/x_tables.h>
8#include <linux/netfilter/nf_tables.h> 8#include <linux/netfilter/nf_tables.h>
9#include <linux/u64_stats_sync.h>
9#include <net/netlink.h> 10#include <net/netlink.h>
10 11
11#define NFT_JUMP_STACK_SIZE 16 12#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@ enum nft_chain_type {
528}; 529};
529 530
530struct nft_stats { 531struct nft_stats {
531 u64 bytes; 532 u64 bytes;
532 u64 pkts; 533 u64 pkts;
534 struct u64_stats_sync syncp;
533}; 535};
534 536
535#define NFT_HOOK_OPS_MAX 2 537#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394cb91a8..eee608b12cc9 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@ struct netns_nftables {
13 struct nft_af_info *inet; 13 struct nft_af_info *inet;
14 struct nft_af_info *arp; 14 struct nft_af_info *arp;
15 struct nft_af_info *bridge; 15 struct nft_af_info *bridge;
16 unsigned int base_seq;
16 u8 gencursor; 17 u8 gencursor;
17 u8 genctr;
18}; 18};
19 19
20#endif 20#endif
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8a1b1f..25084a052a1e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
101 * - add FATTR_CTIME 101 * - add FATTR_CTIME
102 * - add ctime and ctimensec to fuse_setattr_in 102 * - add ctime and ctimensec to fuse_setattr_in
103 * - add FUSE_RENAME2 request 103 * - add FUSE_RENAME2 request
104 * - add FUSE_NO_OPEN_SUPPORT flag
104 */ 105 */
105 106
106#ifndef _LINUX_FUSE_H 107#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
229 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus 230 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
230 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 231 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
231 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 232 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
233 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
232 */ 234 */
233#define FUSE_ASYNC_READ (1 << 0) 235#define FUSE_ASYNC_READ (1 << 0)
234#define FUSE_POSIX_LOCKS (1 << 1) 236#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
247#define FUSE_READDIRPLUS_AUTO (1 << 14) 249#define FUSE_READDIRPLUS_AUTO (1 << 14)
248#define FUSE_ASYNC_DIO (1 << 15) 250#define FUSE_ASYNC_DIO (1 << 15)
249#define FUSE_WRITEBACK_CACHE (1 << 16) 251#define FUSE_WRITEBACK_CACHE (1 << 16)
252#define FUSE_NO_OPEN_SUPPORT (1 << 17)
250 253
251/** 254/**
252 * CUSE INIT request/reply flags 255 * CUSE INIT request/reply flags
diff --git a/kernel/events/core.c b/kernel/events/core.c
index b0c95f0f06fd..6b17ac1b0c2a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7458,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
7458 struct perf_event_context *child_ctx, 7458 struct perf_event_context *child_ctx,
7459 struct task_struct *child) 7459 struct task_struct *child)
7460{ 7460{
7461 perf_remove_from_context(child_event, true); 7461 /*
7462 * Do not destroy the 'original' grouping; because of the context
7463 * switch optimization the original events could've ended up in a
7464 * random child task.
7465 *
7466 * If we were to destroy the original group, all group related
7467 * operations would cease to function properly after this random
7468 * child dies.
7469 *
7470 * Do destroy all inherited groups, we don't care about those
7471 * and being thorough is better.
7472 */
7473 perf_remove_from_context(child_event, !!child_event->parent);
7462 7474
7463 /* 7475 /*
7464 * It can happen that the parent exits first, and has events 7476 * It can happen that the parent exits first, and has events
@@ -7474,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
7474static void perf_event_exit_task_context(struct task_struct *child, int ctxn) 7486static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7475{ 7487{
7476 struct perf_event *child_event, *next; 7488 struct perf_event *child_event, *next;
7477 struct perf_event_context *child_ctx; 7489 struct perf_event_context *child_ctx, *parent_ctx;
7478 unsigned long flags; 7490 unsigned long flags;
7479 7491
7480 if (likely(!child->perf_event_ctxp[ctxn])) { 7492 if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7499,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7499 raw_spin_lock(&child_ctx->lock); 7511 raw_spin_lock(&child_ctx->lock);
7500 task_ctx_sched_out(child_ctx); 7512 task_ctx_sched_out(child_ctx);
7501 child->perf_event_ctxp[ctxn] = NULL; 7513 child->perf_event_ctxp[ctxn] = NULL;
7514
7515 /*
7516 * In order to avoid freeing: child_ctx->parent_ctx->task
7517 * under perf_event_context::lock, grab another reference.
7518 */
7519 parent_ctx = child_ctx->parent_ctx;
7520 if (parent_ctx)
7521 get_ctx(parent_ctx);
7522
7502 /* 7523 /*
7503 * If this context is a clone; unclone it so it can't get 7524 * If this context is a clone; unclone it so it can't get
7504 * swapped to another process while we're removing all 7525 * swapped to another process while we're removing all
@@ -7509,6 +7530,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7509 raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 7530 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7510 7531
7511 /* 7532 /*
7533 * Now that we no longer hold perf_event_context::lock, drop
7534 * our extra child_ctx->parent_ctx reference.
7535 */
7536 if (parent_ctx)
7537 put_ctx(parent_ctx);
7538
7539 /*
7512 * Report the task dead after unscheduling the events so that we 7540 * Report the task dead after unscheduling the events so that we
7513 * won't get any samples after PERF_RECORD_EXIT. We can however still 7541 * won't get any samples after PERF_RECORD_EXIT. We can however still
7514 * get a few PERF_RECORD_READ events. 7542 * get a few PERF_RECORD_READ events.
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3214289df5a7..734e9a7d280b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
2037{ 2037{
2038 unsigned long *iter; 2038 unsigned long *iter;
2039 struct kprobe_blacklist_entry *ent; 2039 struct kprobe_blacklist_entry *ent;
2040 unsigned long offset = 0, size = 0; 2040 unsigned long entry, offset = 0, size = 0;
2041 2041
2042 for (iter = start; iter < end; iter++) { 2042 for (iter = start; iter < end; iter++) {
2043 if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { 2043 entry = arch_deref_entry_point((void *)*iter);
2044 pr_err("Failed to find blacklist %p\n", (void *)*iter); 2044
2045 if (!kernel_text_address(entry) ||
2046 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
2047 pr_err("Failed to find blacklist at %p\n",
2048 (void *)entry);
2045 continue; 2049 continue;
2046 } 2050 }
2047 2051
2048 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2052 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2049 if (!ent) 2053 if (!ent)
2050 return -ENOMEM; 2054 return -ENOMEM;
2051 ent->start_addr = *iter; 2055 ent->start_addr = entry;
2052 ent->end_addr = *iter + size; 2056 ent->end_addr = entry + size;
2053 INIT_LIST_HEAD(&ent->list); 2057 INIT_LIST_HEAD(&ent->list);
2054 list_add_tail(&ent->list, &kprobe_blacklist); 2058 list_add_tail(&ent->list, &kprobe_blacklist);
2055 } 2059 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bda9621638cc..291397e66669 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -823,7 +823,7 @@ static struct {
823 { trace_clock_local, "local", 1 }, 823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 }, 824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 }, 825 { trace_clock_counter, "counter", 0 },
826 { trace_clock_jiffies, "uptime", 1 }, 826 { trace_clock_jiffies, "uptime", 0 },
827 { trace_clock, "perf", 1 }, 827 { trace_clock, "perf", 1 },
828 ARCH_TRACE_CLOCKS 828 ARCH_TRACE_CLOCKS
829}; 829};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348332b7..57b67b1f24d1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
59 59
60/* 60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 * Note that this use of jiffies_64 is not completely safe on
63 * 32-bit systems. But the window is tiny, and the effect if
64 * we are affected is that we will have an obviously bogus
65 * timestamp on a trace event - i.e. not life threatening.
62 */ 66 */
63u64 notrace trace_clock_jiffies(void) 67u64 notrace trace_clock_jiffies(void)
64{ 68{
65 u64 jiffy = jiffies - INITIAL_JIFFIES; 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69} 70}
70 71
71/* 72/*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..9221c02ed9e2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2604 } else { 2604 } else {
2605 if (cow) 2605 if (cow)
2606 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2607 entry = huge_ptep_get(src_pte);
2607 ptepage = pte_page(entry); 2608 ptepage = pte_page(entry);
2608 get_page(ptepage); 2609 get_page(ptepage);
2609 page_dup_rmap(ptepage); 2610 page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e328931..7211a73ba14d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435 if (av == NULL) /* Not actually mapped anymore */ 435 if (av == NULL) /* Not actually mapped anymore */
436 return; 436 return;
437 437
438 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff = page_to_pgoff(page);
439 read_lock(&tasklist_lock); 439 read_lock(&tasklist_lock);
440 for_each_process (tsk) { 440 for_each_process (tsk) {
441 struct anon_vma_chain *vmac; 441 struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469 mutex_lock(&mapping->i_mmap_mutex); 469 mutex_lock(&mapping->i_mmap_mutex);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 472 pgoff_t pgoff = page_to_pgoff(page);
473 struct task_struct *t = task_early_kill(tsk, force_early); 473 struct task_struct *t = task_early_kill(tsk, force_early);
474 474
475 if (!t) 475 if (!t)
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..7e8d8205b610 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2882 * if page by the offset is not ready to be mapped (cold cache or 2882 * if page by the offset is not ready to be mapped (cold cache or
2883 * something). 2883 * something).
2884 */ 2884 */
2885 if (vma->vm_ops->map_pages && fault_around_pages() > 1) { 2885 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2886 fault_around_pages() > 1) {
2886 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2887 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2887 do_fault_around(vma, address, pte, pgoff, flags); 2888 do_fault_around(vma, address, pte, pgoff, flags);
2888 if (!pte_same(*pte, orig_pte)) 2889 if (!pte_same(*pte, orig_pte))
diff --git a/mm/migrate.c b/mm/migrate.c
index 9e0beaa91845..be6dbf995c0c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -988,9 +988,10 @@ out:
988 * it. Otherwise, putback_lru_page() will drop the reference grabbed 988 * it. Otherwise, putback_lru_page() will drop the reference grabbed
989 * during isolation. 989 * during isolation.
990 */ 990 */
991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) 991 if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
992 ClearPageSwapBacked(newpage);
992 put_new_page(newpage, private); 993 put_new_page(newpage, private);
993 else 994 } else
994 putback_lru_page(newpage); 995 putback_lru_page(newpage);
995 996
996 if (result) { 997 if (result) {
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517static inline unsigned long 517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma) 518__vma_address(struct page *page, struct vm_area_struct *vma)
519{ 519{
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 520 pgoff_t pgoff = page_to_pgoff(page);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526} 522}
527 523
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1639static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1640{ 1636{
1641 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1638 pgoff_t pgoff = page_to_pgoff(page);
1643 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1644 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1645 1641
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1680static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1681{ 1677{
1682 struct address_space *mapping = page->mapping; 1678 struct address_space *mapping = page->mapping;
1683 pgoff_t pgoff = page->index << compound_order(page); 1679 pgoff_t pgoff = page_to_pgoff(page);
1684 struct vm_area_struct *vma; 1680 struct vm_area_struct *vma;
1685 int ret = SWAP_AGAIN; 1681 int ret = SWAP_AGAIN;
1686 1682
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49b6ded..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
85 * a time): we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */ 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
90 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468 return; 468 return;
469 469
470 index = start; 470 index = start;
471 for ( ; ; ) { 471 while (index < end) {
472 cond_resched(); 472 cond_resched();
473 473
474 pvec.nr = find_get_entries(mapping, index, 474 pvec.nr = find_get_entries(mapping, index,
475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 min(end - index, (pgoff_t)PAGEVEC_SIZE),
476 pvec.pages, indices); 476 pvec.pages, indices);
477 if (!pvec.nr) { 477 if (!pvec.nr) {
478 if (index == start || unfalloc) 478 /* If all gone or hole-punch or unfalloc, we're done */
479 if (index == start || end != -1)
479 break; 480 break;
481 /* But if truncating, restart to make sure all gone */
480 index = start; 482 index = start;
481 continue; 483 continue;
482 } 484 }
483 if ((index == start || unfalloc) && indices[0] >= end) {
484 pagevec_remove_exceptionals(&pvec);
485 pagevec_release(&pvec);
486 break;
487 }
488 mem_cgroup_uncharge_start(); 485 mem_cgroup_uncharge_start();
489 for (i = 0; i < pagevec_count(&pvec); i++) { 486 for (i = 0; i < pagevec_count(&pvec); i++) {
490 struct page *page = pvec.pages[i]; 487 struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496 if (radix_tree_exceptional_entry(page)) { 493 if (radix_tree_exceptional_entry(page)) {
497 if (unfalloc) 494 if (unfalloc)
498 continue; 495 continue;
499 nr_swaps_freed += !shmem_free_swap(mapping, 496 if (shmem_free_swap(mapping, index, page)) {
500 index, page); 497 /* Swap was replaced by page: retry */
498 index--;
499 break;
500 }
501 nr_swaps_freed++;
501 continue; 502 continue;
502 } 503 }
503 504
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506 if (page->mapping == mapping) { 507 if (page->mapping == mapping) {
507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 VM_BUG_ON_PAGE(PageWriteback(page), page);
508 truncate_inode_page(mapping, page); 509 truncate_inode_page(mapping, page);
510 } else {
511 /* Page was replaced by swap: retry */
512 unlock_page(page);
513 index--;
514 break;
509 } 515 }
510 } 516 }
511 unlock_page(page); 517 unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760 spin_lock(&inode->i_lock); 766 spin_lock(&inode->i_lock);
761 shmem_falloc = inode->i_private; 767 shmem_falloc = inode->i_private;
762 if (shmem_falloc && 768 if (shmem_falloc &&
763 !shmem_falloc->mode && 769 !shmem_falloc->waitq &&
764 index >= shmem_falloc->start && 770 index >= shmem_falloc->start &&
765 index < shmem_falloc->next) 771 index < shmem_falloc->next)
766 shmem_falloc->nr_unswapped++; 772 shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1248 * Trinity finds that probing a hole which tmpfs is punching can 1254 * Trinity finds that probing a hole which tmpfs is punching can
1249 * prevent the hole-punch from ever completing: which in turn 1255 * prevent the hole-punch from ever completing: which in turn
1250 * locks writers out with its hold on i_mutex. So refrain from 1256 * locks writers out with its hold on i_mutex. So refrain from
1251 * faulting pages into the hole while it's being punched, and 1257 * faulting pages into the hole while it's being punched. Although
1252 * wait on i_mutex to be released if vmf->flags permits. 1258 * shmem_undo_range() does remove the additions, it may be unable to
1259 * keep up, as each new page needs its own unmap_mapping_range() call,
1260 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1261 *
1262 * It does not matter if we sometimes reach this check just before the
1263 * hole-punch begins, so that one fault then races with the punch:
1264 * we just need to make racing faults a rare case.
1265 *
1266 * The implementation below would be much simpler if we just used a
1267 * standard mutex or completion: but we cannot take i_mutex in fault,
1268 * and bloating every shmem inode for this unlikely case would be sad.
1253 */ 1269 */
1254 if (unlikely(inode->i_private)) { 1270 if (unlikely(inode->i_private)) {
1255 struct shmem_falloc *shmem_falloc; 1271 struct shmem_falloc *shmem_falloc;
1256 1272
1257 spin_lock(&inode->i_lock); 1273 spin_lock(&inode->i_lock);
1258 shmem_falloc = inode->i_private; 1274 shmem_falloc = inode->i_private;
1259 if (!shmem_falloc || 1275 if (shmem_falloc &&
1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1276 shmem_falloc->waitq &&
1261 vmf->pgoff < shmem_falloc->start || 1277 vmf->pgoff >= shmem_falloc->start &&
1262 vmf->pgoff >= shmem_falloc->next) 1278 vmf->pgoff < shmem_falloc->next) {
1263 shmem_falloc = NULL; 1279 wait_queue_head_t *shmem_falloc_waitq;
1264 spin_unlock(&inode->i_lock); 1280 DEFINE_WAIT(shmem_fault_wait);
1265 /* 1281
1266 * i_lock has protected us from taking shmem_falloc seriously 1282 ret = VM_FAULT_NOPAGE;
1267 * once return from shmem_fallocate() went back up that stack.
1268 * i_lock does not serialize with i_mutex at all, but it does
1269 * not matter if sometimes we wait unnecessarily, or sometimes
1270 * miss out on waiting: we just need to make those cases rare.
1271 */
1272 if (shmem_falloc) {
1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1283 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1284 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285 /* It's polite to up mmap_sem if we can */
1275 up_read(&vma->vm_mm->mmap_sem); 1286 up_read(&vma->vm_mm->mmap_sem);
1276 mutex_lock(&inode->i_mutex); 1287 ret = VM_FAULT_RETRY;
1277 mutex_unlock(&inode->i_mutex);
1278 return VM_FAULT_RETRY;
1279 } 1288 }
1280 /* cond_resched? Leave that to GUP or return to user */ 1289
1281 return VM_FAULT_NOPAGE; 1290 shmem_falloc_waitq = shmem_falloc->waitq;
1291 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292 TASK_UNINTERRUPTIBLE);
1293 spin_unlock(&inode->i_lock);
1294 schedule();
1295
1296 /*
1297 * shmem_falloc_waitq points into the shmem_fallocate()
1298 * stack of the hole-punching task: shmem_falloc_waitq
1299 * is usually invalid by the time we reach here, but
1300 * finish_wait() does not dereference it in that case;
1301 * though i_lock needed lest racing with wake_up_all().
1302 */
1303 spin_lock(&inode->i_lock);
1304 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305 spin_unlock(&inode->i_lock);
1306 return ret;
1282 } 1307 }
1308 spin_unlock(&inode->i_lock);
1283 } 1309 }
1284 1310
1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1774 1800
1775 mutex_lock(&inode->i_mutex); 1801 mutex_lock(&inode->i_mutex);
1776 1802
1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778
1779 if (mode & FALLOC_FL_PUNCH_HOLE) { 1803 if (mode & FALLOC_FL_PUNCH_HOLE) {
1780 struct address_space *mapping = file->f_mapping; 1804 struct address_space *mapping = file->f_mapping;
1781 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1805 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1806 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1783 1808
1809 shmem_falloc.waitq = &shmem_falloc_waitq;
1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1810 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1811 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1786 spin_lock(&inode->i_lock); 1812 spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1792 1 + unmap_end - unmap_start, 0); 1818 1 + unmap_end - unmap_start, 0);
1793 shmem_truncate_range(inode, offset, offset + len - 1); 1819 shmem_truncate_range(inode, offset, offset + len - 1);
1794 /* No need to unmap again: hole-punching leaves COWed pages */ 1820 /* No need to unmap again: hole-punching leaves COWed pages */
1821
1822 spin_lock(&inode->i_lock);
1823 inode->i_private = NULL;
1824 wake_up_all(&shmem_falloc_waitq);
1825 spin_unlock(&inode->i_lock);
1795 error = 0; 1826 error = 0;
1796 goto undone; 1827 goto out;
1797 } 1828 }
1798 1829
1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1830 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1809 goto out; 1840 goto out;
1810 } 1841 }
1811 1842
1843 shmem_falloc.waitq = NULL;
1812 shmem_falloc.start = start; 1844 shmem_falloc.start = start;
1813 shmem_falloc.next = start; 1845 shmem_falloc.next = start;
1814 shmem_falloc.nr_falloced = 0; 1846 shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a0db6f..d31c4bacc6a2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
55 continue; 55 continue;
56 } 56 }
57 57
58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB)
59 if (!strcmp(s->name, name)) { 59 if (!strcmp(s->name, name)) {
60 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
61 __func__, name); 61 __func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355 for ( ; ; ) { 355 for ( ; ; ) {
356 cond_resched(); 356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index, 357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), 358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359 indices)) { 359 /* If all gone from start onwards, we're done */
360 if (index == start) 360 if (index == start)
361 break; 361 break;
362 /* Otherwise restart to make sure all gone */
362 index = start; 363 index = start;
363 continue; 364 continue;
364 } 365 }
365 if (index == start && indices[0] >= end) { 366 if (index == start && indices[0] >= end) {
367 /* All gone out of hole to be punched, we're done */
366 pagevec_remove_exceptionals(&pvec); 368 pagevec_remove_exceptionals(&pvec);
367 pagevec_release(&pvec); 369 pagevec_release(&pvec);
368 break; 370 break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373 375
374 /* We rely upon deletion not changing page->index */ 376 /* We rely upon deletion not changing page->index */
375 index = indices[i]; 377 index = indices[i];
376 if (index >= end) 378 if (index >= end) {
379 /* Restart punch to make sure all gone */
380 index = start - 1;
377 break; 381 break;
382 }
378 383
379 if (radix_tree_exceptional_entry(page)) { 384 if (radix_tree_exceptional_entry(page)) {
380 clear_exceptional_entry(mapping, index, page); 385 clear_exceptional_entry(mapping, index, page);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec37950..a957c8140721 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest; 801 bla_dst_own = &bat_priv->bla.claim_dest;
802 802
803 /* check if it is a claim packet in general */
804 if (memcmp(bla_dst->magic, bla_dst_own->magic,
805 sizeof(bla_dst->magic)) != 0)
806 return 0;
807
808 /* if announcement packet, use the source, 803 /* if announcement packet, use the source,
809 * otherwise assume it is in the hw_src 804 * otherwise assume it is in the hw_src
810 */ 805 */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
866 struct batadv_hard_iface *primary_if, 861 struct batadv_hard_iface *primary_if,
867 struct sk_buff *skb) 862 struct sk_buff *skb)
868{ 863{
869 struct batadv_bla_claim_dst *bla_dst; 864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
870 uint8_t *hw_src, *hw_dst; 865 uint8_t *hw_src, *hw_dst;
871 struct vlan_ethhdr *vhdr; 866 struct vlan_hdr *vhdr, vhdr_buf;
872 struct ethhdr *ethhdr; 867 struct ethhdr *ethhdr;
873 struct arphdr *arphdr; 868 struct arphdr *arphdr;
874 unsigned short vid; 869 unsigned short vid;
870 int vlan_depth = 0;
875 __be16 proto; 871 __be16 proto;
876 int headlen; 872 int headlen;
877 int ret; 873 int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
882 proto = ethhdr->h_proto; 878 proto = ethhdr->h_proto;
883 headlen = ETH_HLEN; 879 headlen = ETH_HLEN;
884 if (vid & BATADV_VLAN_HAS_TAG) { 880 if (vid & BATADV_VLAN_HAS_TAG) {
885 vhdr = vlan_eth_hdr(skb); 881 /* Traverse the VLAN/Ethertypes.
886 proto = vhdr->h_vlan_encapsulated_proto; 882 *
887 headlen += VLAN_HLEN; 883 * At this point it is known that the first protocol is a VLAN
884 * header, so start checking at the encapsulated protocol.
885 *
886 * The depth of the VLAN headers is recorded to drop BLA claim
887 * frames encapsulated into multiple VLAN headers (QinQ).
888 */
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
888 } 899 }
889 900
890 if (proto != htons(ETH_P_ARP)) 901 if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
914 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
915 hw_dst = hw_src + ETH_ALEN + 4; 926 hw_dst = hw_src + ETH_ALEN + 4;
916 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930 /* check if it is a claim frame in general */
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935 /* check if there is a claim frame encapsulated deeper in (QinQ) and
936 * drop that, as this is not supported by BLA but should also not be
937 * sent via the mesh.
938 */
939 if (vlan_depth > 1)
940 return 1;
917 941
918 /* check if it is a claim frame. */ 942 /* check if it is a claim frame. */
919 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e7ee65dc20bf..cbd677f48c00 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@ out:
448 * possibly free it 448 * possibly free it
449 * @softif_vlan: the vlan object to release 449 * @softif_vlan: the vlan object to release
450 */ 450 */
451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) 451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
452{ 452{
453 if (atomic_dec_and_test(&softif_vlan->refcount)) 453 if (atomic_dec_and_test(&vlan->refcount)) {
454 kfree_rcu(softif_vlan, rcu); 454 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
455 hlist_del_rcu(&vlan->list);
456 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
457
458 kfree_rcu(vlan, rcu);
459 }
455} 460}
456 461
457/** 462/**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
505 if (!vlan) 510 if (!vlan)
506 return -ENOMEM; 511 return -ENOMEM;
507 512
513 vlan->bat_priv = bat_priv;
508 vlan->vid = vid; 514 vlan->vid = vid;
509 atomic_set(&vlan->refcount, 1); 515 atomic_set(&vlan->refcount, 1);
510 516
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
516 return err; 522 return err;
517 } 523 }
518 524
525 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
526 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
527 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
528
519 /* add a new TT local entry. This one will be marked with the NOPURGE 529 /* add a new TT local entry. This one will be marked with the NOPURGE
520 * flag 530 * flag
521 */ 531 */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
523 bat_priv->soft_iface->dev_addr, vid, 533 bat_priv->soft_iface->dev_addr, vid,
524 BATADV_NULL_IFINDEX, BATADV_NO_MARK); 534 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
525 535
526 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
527 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
528 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
529
530 return 0; 536 return 0;
531} 537}
532 538
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
538static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, 544static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
539 struct batadv_softif_vlan *vlan) 545 struct batadv_softif_vlan *vlan)
540{ 546{
541 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
542 hlist_del_rcu(&vlan->list);
543 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
544
545 batadv_sysfs_del_vlan(bat_priv, vlan);
546
547 /* explicitly remove the associated TT local entry because it is marked 547 /* explicitly remove the associated TT local entry because it is marked
548 * with the NOPURGE flag 548 * with the NOPURGE flag
549 */ 549 */
550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, 550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
551 vlan->vid, "vlan interface destroyed", false); 551 vlan->vid, "vlan interface destroyed", false);
552 552
553 batadv_sysfs_del_vlan(bat_priv, vlan);
553 batadv_softif_vlan_free_ref(vlan); 554 batadv_softif_vlan_free_ref(vlan);
554} 555}
555 556
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
567 unsigned short vid) 568 unsigned short vid)
568{ 569{
569 struct batadv_priv *bat_priv = netdev_priv(dev); 570 struct batadv_priv *bat_priv = netdev_priv(dev);
571 struct batadv_softif_vlan *vlan;
572 int ret;
570 573
571 /* only 802.1Q vlans are supported. 574 /* only 802.1Q vlans are supported.
572 * batman-adv does not know how to handle other types 575 * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
576 579
577 vid |= BATADV_VLAN_HAS_TAG; 580 vid |= BATADV_VLAN_HAS_TAG;
578 581
579 return batadv_softif_create_vlan(bat_priv, vid); 582 /* if a new vlan is getting created and it already exists, it means that
583 * it was not deleted yet. batadv_softif_vlan_get() increases the
584 * refcount in order to revive the object.
585 *
586 * if it does not exist then create it.
587 */
588 vlan = batadv_softif_vlan_get(bat_priv, vid);
589 if (!vlan)
590 return batadv_softif_create_vlan(bat_priv, vid);
591
592 /* recreate the sysfs object if it was already destroyed (and it should
593 * be since we received a kill_vid() for this vlan
594 */
595 if (!vlan->kobj) {
596 ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
597 if (ret) {
598 batadv_softif_vlan_free_ref(vlan);
599 return ret;
600 }
601 }
602
603 /* add a new TT local entry. This one will be marked with the NOPURGE
604 * flag. This must be added again, even if the vlan object already
605 * exists, because the entry was deleted by kill_vid()
606 */
607 batadv_tt_local_add(bat_priv->soft_iface,
608 bat_priv->soft_iface->dev_addr, vid,
609 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
610
611 return 0;
580} 612}
581 613
582/** 614/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde72c9a..5f59e7f899a0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
511 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
512 struct batadv_tt_local_entry *tt_local; 512 struct batadv_tt_local_entry *tt_local;
513 struct batadv_tt_global_entry *tt_global = NULL; 513 struct batadv_tt_global_entry *tt_global = NULL;
514 struct batadv_softif_vlan *vlan;
514 struct net_device *in_dev = NULL; 515 struct net_device *in_dev = NULL;
515 struct hlist_head *head; 516 struct hlist_head *head;
516 struct batadv_tt_orig_list_entry *orig_entry; 517 struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
572 if (!tt_local) 573 if (!tt_local)
573 goto out; 574 goto out;
574 575
576 /* increase the refcounter of the related vlan */
577 vlan = batadv_softif_vlan_get(bat_priv, vid);
578
575 batadv_dbg(BATADV_DBG_TT, bat_priv, 579 batadv_dbg(BATADV_DBG_TT, bat_priv,
576 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 580 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
577 addr, BATADV_PRINT_VID(vid), 581 addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
604 if (unlikely(hash_added != 0)) { 608 if (unlikely(hash_added != 0)) {
605 /* remove the reference for the hash */ 609 /* remove the reference for the hash */
606 batadv_tt_local_entry_free_ref(tt_local); 610 batadv_tt_local_entry_free_ref(tt_local);
611 batadv_softif_vlan_free_ref(vlan);
607 goto out; 612 goto out;
608 } 613 }
609 614
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1009{ 1014{
1010 struct batadv_tt_local_entry *tt_local_entry; 1015 struct batadv_tt_local_entry *tt_local_entry;
1011 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1016 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1017 struct batadv_softif_vlan *vlan;
1012 1018
1013 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1019 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1014 if (!tt_local_entry) 1020 if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1039 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1045 hlist_del_rcu(&tt_local_entry->common.hash_entry);
1040 batadv_tt_local_entry_free_ref(tt_local_entry); 1046 batadv_tt_local_entry_free_ref(tt_local_entry);
1041 1047
1048 /* decrease the reference held for this vlan */
1049 vlan = batadv_softif_vlan_get(bat_priv, vid);
1050 batadv_softif_vlan_free_ref(vlan);
1051 batadv_softif_vlan_free_ref(vlan);
1052
1042out: 1053out:
1043 if (tt_local_entry) 1054 if (tt_local_entry)
1044 batadv_tt_local_entry_free_ref(tt_local_entry); 1055 batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1111 spinlock_t *list_lock; /* protects write access to the hash lists */ 1122 spinlock_t *list_lock; /* protects write access to the hash lists */
1112 struct batadv_tt_common_entry *tt_common_entry; 1123 struct batadv_tt_common_entry *tt_common_entry;
1113 struct batadv_tt_local_entry *tt_local; 1124 struct batadv_tt_local_entry *tt_local;
1125 struct batadv_softif_vlan *vlan;
1114 struct hlist_node *node_tmp; 1126 struct hlist_node *node_tmp;
1115 struct hlist_head *head; 1127 struct hlist_head *head;
1116 uint32_t i; 1128 uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1131 tt_local = container_of(tt_common_entry, 1143 tt_local = container_of(tt_common_entry,
1132 struct batadv_tt_local_entry, 1144 struct batadv_tt_local_entry,
1133 common); 1145 common);
1146
1147 /* decrease the reference held for this vlan */
1148 vlan = batadv_softif_vlan_get(bat_priv,
1149 tt_common_entry->vid);
1150 batadv_softif_vlan_free_ref(vlan);
1151 batadv_softif_vlan_free_ref(vlan);
1152
1134 batadv_tt_local_entry_free_ref(tt_local); 1153 batadv_tt_local_entry_free_ref(tt_local);
1135 } 1154 }
1136 spin_unlock_bh(list_lock); 1155 spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3139 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 3158 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
3140 struct batadv_tt_common_entry *tt_common; 3159 struct batadv_tt_common_entry *tt_common;
3141 struct batadv_tt_local_entry *tt_local; 3160 struct batadv_tt_local_entry *tt_local;
3161 struct batadv_softif_vlan *vlan;
3142 struct hlist_node *node_tmp; 3162 struct hlist_node *node_tmp;
3143 struct hlist_head *head; 3163 struct hlist_head *head;
3144 spinlock_t *list_lock; /* protects write access to the hash lists */ 3164 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3167 tt_local = container_of(tt_common, 3187 tt_local = container_of(tt_common,
3168 struct batadv_tt_local_entry, 3188 struct batadv_tt_local_entry,
3169 common); 3189 common);
3190
3191 /* decrease the reference held for this vlan */
3192 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3193 batadv_softif_vlan_free_ref(vlan);
3194 batadv_softif_vlan_free_ref(vlan);
3195
3170 batadv_tt_local_entry_free_ref(tt_local); 3196 batadv_tt_local_entry_free_ref(tt_local);
3171 } 3197 }
3172 spin_unlock_bh(list_lock); 3198 spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a56773f..8854c05622a9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
687 687
688/** 688/**
689 * struct batadv_softif_vlan - per VLAN attributes set 689 * struct batadv_softif_vlan - per VLAN attributes set
690 * @bat_priv: pointer to the mesh object
690 * @vid: VLAN identifier 691 * @vid: VLAN identifier
691 * @kobj: kobject for sysfs vlan subdirectory 692 * @kobj: kobject for sysfs vlan subdirectory
692 * @ap_isolation: AP isolation state 693 * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
696 * @rcu: struct used for freeing in a RCU-safe manner 697 * @rcu: struct used for freeing in a RCU-safe manner
697 */ 698 */
698struct batadv_softif_vlan { 699struct batadv_softif_vlan {
700 struct batadv_priv *bat_priv;
699 unsigned short vid; 701 unsigned short vid;
700 struct kobject *kobj; 702 struct kobject *kobj;
701 atomic_t ap_isolation; /* boolean */ 703 atomic_t ap_isolation; /* boolean */
diff --git a/net/core/dev.c b/net/core/dev.c
index 7990984ca364..367a586d0c8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4096,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4096 skb->vlan_tci = 0; 4096 skb->vlan_tci = 0;
4097 skb->dev = napi->dev; 4097 skb->dev = napi->dev;
4098 skb->skb_iif = 0; 4098 skb->skb_iif = 0;
4099 skb->encapsulation = 0;
4100 skb_shinfo(skb)->gso_type = 0;
4099 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4101 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4100 4102
4101 napi->skb = skb; 4103 napi->skb = skb;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9acec61f5433..dd8696a3dbec 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
150 goto put; 150 goto put;
151 151
152 memcpy(*_result, upayload->data, len); 152 memcpy(*_result, upayload->data, len);
153 *_result[len] = '\0'; 153 (*_result)[len] = '\0';
154 154
155 if (_expiry) 155 if (_expiry)
156 *_expiry = rkey->expiry; 156 *_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836cf772..d156b3c5f363 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1429 int proto = iph->protocol; 1429 int proto = iph->protocol;
1430 int err = -ENOSYS; 1430 int err = -ENOSYS;
1431 1431
1432 if (skb->encapsulation)
1433 skb_set_inner_network_header(skb, nhoff);
1434
1432 csum_replace2(&iph->check, iph->tot_len, newlen); 1435 csum_replace2(&iph->check, iph->tot_len, newlen);
1433 iph->tot_len = newlen; 1436 iph->tot_len = newlen;
1434 1437
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb12666..f0bdd47bbbcb 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
263 int err = -ENOENT; 263 int err = -ENOENT;
264 __be16 type; 264 __be16 type;
265 265
266 skb->encapsulation = 1;
267 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
268
266 type = greh->protocol; 269 type = greh->protocol;
267 if (greh->flags & GRE_KEY) 270 if (greh->flags & GRE_KEY)
268 grehlen += GRE_HEADER_SECTION; 271 grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aecea05cd..ad382499bace 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
288 optptr++; 288 optptr++;
289 continue; 289 continue;
290 } 290 }
291 if (unlikely(l < 2)) {
292 pp_ptr = optptr;
293 goto error;
294 }
291 optlen = optptr[1]; 295 optlen = optptr[1];
292 if (optlen < 2 || optlen > l) { 296 if (optlen < 2 || optlen > l) {
293 pp_ptr = optptr; 297 pp_ptr = optptr;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59ec7f7..55046ecd083e 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
309 309
310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 iph->daddr, 0); 311 iph->daddr, 0);
312 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 312 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
313 313
314 return tcp_gro_complete(skb); 314 return tcp_gro_complete(skb);
315} 315}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3cd1aed..01b0ff9a0c2c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
73 73
74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, 74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
75 &iph->daddr, 0); 75 &iph->daddr, 0);
76 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 76 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
77 77
78 return tcp_gro_complete(skb); 78 return tcp_gro_complete(skb);
79} 79}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566cfcbe4..8746ff9a8357 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
35{ 35{
36 INIT_LIST_HEAD(&afi->tables); 36 INIT_LIST_HEAD(&afi->tables);
37 nfnl_lock(NFNL_SUBSYS_NFTABLES); 37 nfnl_lock(NFNL_SUBSYS_NFTABLES);
38 list_add_tail(&afi->list, &net->nft.af_info); 38 list_add_tail_rcu(&afi->list, &net->nft.af_info);
39 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 39 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
40 return 0; 40 return 0;
41} 41}
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
51void nft_unregister_afinfo(struct nft_af_info *afi) 51void nft_unregister_afinfo(struct nft_af_info *afi)
52{ 52{
53 nfnl_lock(NFNL_SUBSYS_NFTABLES); 53 nfnl_lock(NFNL_SUBSYS_NFTABLES);
54 list_del(&afi->list); 54 list_del_rcu(&afi->list);
55 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 55 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
56} 56}
57EXPORT_SYMBOL_GPL(nft_unregister_afinfo); 57EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
277 struct net *net = sock_net(skb->sk); 277 struct net *net = sock_net(skb->sk);
278 int family = nfmsg->nfgen_family; 278 int family = nfmsg->nfgen_family;
279 279
280 list_for_each_entry(afi, &net->nft.af_info, list) { 280 rcu_read_lock();
281 cb->seq = net->nft.base_seq;
282
283 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
281 if (family != NFPROTO_UNSPEC && family != afi->family) 284 if (family != NFPROTO_UNSPEC && family != afi->family)
282 continue; 285 continue;
283 286
284 list_for_each_entry(table, &afi->tables, list) { 287 list_for_each_entry_rcu(table, &afi->tables, list) {
285 if (idx < s_idx) 288 if (idx < s_idx)
286 goto cont; 289 goto cont;
287 if (idx > s_idx) 290 if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
294 NLM_F_MULTI, 297 NLM_F_MULTI,
295 afi->family, table) < 0) 298 afi->family, table) < 0)
296 goto done; 299 goto done;
300
301 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
297cont: 302cont:
298 idx++; 303 idx++;
299 } 304 }
300 } 305 }
301done: 306done:
307 rcu_read_unlock();
302 cb->args[0] = idx; 308 cb->args[0] = idx;
303 return skb->len; 309 return skb->len;
304} 310}
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
407 if (flags & ~NFT_TABLE_F_DORMANT) 413 if (flags & ~NFT_TABLE_F_DORMANT)
408 return -EINVAL; 414 return -EINVAL;
409 415
416 if (flags == ctx->table->flags)
417 return 0;
418
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, 419 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table)); 420 sizeof(struct nft_trans_table));
412 if (trans == NULL) 421 if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
514 module_put(afi->owner); 523 module_put(afi->owner);
515 return err; 524 return err;
516 } 525 }
517 list_add_tail(&table->list, &afi->tables); 526 list_add_tail_rcu(&table->list, &afi->tables);
518 return 0; 527 return 0;
519} 528}
520 529
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
546 if (err < 0) 555 if (err < 0)
547 return err; 556 return err;
548 557
549 list_del(&table->list); 558 list_del_rcu(&table->list);
550 return 0; 559 return 0;
551} 560}
552 561
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
635{ 644{
636 struct nft_stats *cpu_stats, total; 645 struct nft_stats *cpu_stats, total;
637 struct nlattr *nest; 646 struct nlattr *nest;
647 unsigned int seq;
648 u64 pkts, bytes;
638 int cpu; 649 int cpu;
639 650
640 memset(&total, 0, sizeof(total)); 651 memset(&total, 0, sizeof(total));
641 for_each_possible_cpu(cpu) { 652 for_each_possible_cpu(cpu) {
642 cpu_stats = per_cpu_ptr(stats, cpu); 653 cpu_stats = per_cpu_ptr(stats, cpu);
643 total.pkts += cpu_stats->pkts; 654 do {
644 total.bytes += cpu_stats->bytes; 655 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
656 pkts = cpu_stats->pkts;
657 bytes = cpu_stats->bytes;
658 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
659 total.pkts += pkts;
660 total.bytes += bytes;
645 } 661 }
646 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); 662 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
647 if (nest == NULL) 663 if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
761 struct net *net = sock_net(skb->sk); 777 struct net *net = sock_net(skb->sk);
762 int family = nfmsg->nfgen_family; 778 int family = nfmsg->nfgen_family;
763 779
764 list_for_each_entry(afi, &net->nft.af_info, list) { 780 rcu_read_lock();
781 cb->seq = net->nft.base_seq;
782
783 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
765 if (family != NFPROTO_UNSPEC && family != afi->family) 784 if (family != NFPROTO_UNSPEC && family != afi->family)
766 continue; 785 continue;
767 786
768 list_for_each_entry(table, &afi->tables, list) { 787 list_for_each_entry_rcu(table, &afi->tables, list) {
769 list_for_each_entry(chain, &table->chains, list) { 788 list_for_each_entry_rcu(chain, &table->chains, list) {
770 if (idx < s_idx) 789 if (idx < s_idx)
771 goto cont; 790 goto cont;
772 if (idx > s_idx) 791 if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
778 NLM_F_MULTI, 797 NLM_F_MULTI,
779 afi->family, table, chain) < 0) 798 afi->family, table, chain) < 0)
780 goto done; 799 goto done;
800
801 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
781cont: 802cont:
782 idx++; 803 idx++;
783 } 804 }
784 } 805 }
785 } 806 }
786done: 807done:
808 rcu_read_unlock();
787 cb->args[0] = idx; 809 cb->args[0] = idx;
788 return skb->len; 810 return skb->len;
789} 811}
790 812
791
792static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
793 const struct nlmsghdr *nlh, 814 const struct nlmsghdr *nlh,
794 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 882 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
862 return ERR_PTR(-EINVAL); 883 return ERR_PTR(-EINVAL);
863 884
864 newstats = alloc_percpu(struct nft_stats); 885 newstats = netdev_alloc_pcpu_stats(struct nft_stats);
865 if (newstats == NULL) 886 if (newstats == NULL)
866 return ERR_PTR(-ENOMEM); 887 return ERR_PTR(-ENOMEM);
867 888
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1077 } 1098 }
1078 basechain->stats = stats; 1099 basechain->stats = stats;
1079 } else { 1100 } else {
1080 stats = alloc_percpu(struct nft_stats); 1101 stats = netdev_alloc_pcpu_stats(struct nft_stats);
1081 if (IS_ERR(stats)) { 1102 if (IS_ERR(stats)) {
1082 module_put(type->owner); 1103 module_put(type->owner);
1083 kfree(basechain); 1104 kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1130 goto err2; 1151 goto err2;
1131 1152
1132 table->use++; 1153 table->use++;
1133 list_add_tail(&chain->list, &table->chains); 1154 list_add_tail_rcu(&chain->list, &table->chains);
1134 return 0; 1155 return 0;
1135err2: 1156err2:
1136 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1157 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1180 return err; 1201 return err;
1181 1202
1182 table->use--; 1203 table->use--;
1183 list_del(&chain->list); 1204 list_del_rcu(&chain->list);
1184 return 0; 1205 return 0;
1185} 1206}
1186 1207
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
1199{ 1220{
1200 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1221 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1201 if (type->family == NFPROTO_UNSPEC) 1222 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions); 1223 list_add_tail_rcu(&type->list, &nf_tables_expressions);
1203 else 1224 else
1204 list_add(&type->list, &nf_tables_expressions); 1225 list_add_rcu(&type->list, &nf_tables_expressions);
1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1226 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1206 return 0; 1227 return 0;
1207} 1228}
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
1216void nft_unregister_expr(struct nft_expr_type *type) 1237void nft_unregister_expr(struct nft_expr_type *type)
1217{ 1238{
1218 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1239 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1219 list_del(&type->list); 1240 list_del_rcu(&type->list);
1220 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1241 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1221} 1242}
1222EXPORT_SYMBOL_GPL(nft_unregister_expr); 1243EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1549 unsigned int idx = 0, s_idx = cb->args[0]; 1570 unsigned int idx = 0, s_idx = cb->args[0];
1550 struct net *net = sock_net(skb->sk); 1571 struct net *net = sock_net(skb->sk);
1551 int family = nfmsg->nfgen_family; 1572 int family = nfmsg->nfgen_family;
1552 u8 genctr = ACCESS_ONCE(net->nft.genctr);
1553 u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
1554 1573
1555 list_for_each_entry(afi, &net->nft.af_info, list) { 1574 rcu_read_lock();
1575 cb->seq = net->nft.base_seq;
1576
1577 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
1556 if (family != NFPROTO_UNSPEC && family != afi->family) 1578 if (family != NFPROTO_UNSPEC && family != afi->family)
1557 continue; 1579 continue;
1558 1580
1559 list_for_each_entry(table, &afi->tables, list) { 1581 list_for_each_entry_rcu(table, &afi->tables, list) {
1560 list_for_each_entry(chain, &table->chains, list) { 1582 list_for_each_entry_rcu(chain, &table->chains, list) {
1561 list_for_each_entry(rule, &chain->rules, list) { 1583 list_for_each_entry_rcu(rule, &chain->rules, list) {
1562 if (!nft_rule_is_active(net, rule)) 1584 if (!nft_rule_is_active(net, rule))
1563 goto cont; 1585 goto cont;
1564 if (idx < s_idx) 1586 if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1572 NLM_F_MULTI | NLM_F_APPEND, 1594 NLM_F_MULTI | NLM_F_APPEND,
1573 afi->family, table, chain, rule) < 0) 1595 afi->family, table, chain, rule) < 0)
1574 goto done; 1596 goto done;
1597
1598 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1575cont: 1599cont:
1576 idx++; 1600 idx++;
1577 } 1601 }
@@ -1579,9 +1603,7 @@ cont:
1579 } 1603 }
1580 } 1604 }
1581done: 1605done:
1582 /* Invalidate this dump, a transition to the new generation happened */ 1606 rcu_read_unlock();
1583 if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
1584 return -EBUSY;
1585 1607
1586 cb->args[0] = idx; 1608 cb->args[0] = idx;
1587 return skb->len; 1609 return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
1932int nft_register_set(struct nft_set_ops *ops) 1954int nft_register_set(struct nft_set_ops *ops)
1933{ 1955{
1934 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1956 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1935 list_add_tail(&ops->list, &nf_tables_set_ops); 1957 list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
1936 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1958 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1937 return 0; 1959 return 0;
1938} 1960}
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
1941void nft_unregister_set(struct nft_set_ops *ops) 1963void nft_unregister_set(struct nft_set_ops *ops)
1942{ 1964{
1943 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1965 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1944 list_del(&ops->list); 1966 list_del_rcu(&ops->list);
1945 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1967 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1946} 1968}
1947EXPORT_SYMBOL_GPL(nft_unregister_set); 1969EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2234 if (cb->args[1]) 2256 if (cb->args[1])
2235 return skb->len; 2257 return skb->len;
2236 2258
2237 list_for_each_entry(set, &ctx->table->sets, list) { 2259 rcu_read_lock();
2260 cb->seq = ctx->net->nft.base_seq;
2261
2262 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2238 if (idx < s_idx) 2263 if (idx < s_idx)
2239 goto cont; 2264 goto cont;
2240 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2265 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2242 cb->args[0] = idx; 2267 cb->args[0] = idx;
2243 goto done; 2268 goto done;
2244 } 2269 }
2270 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2245cont: 2271cont:
2246 idx++; 2272 idx++;
2247 } 2273 }
2248 cb->args[1] = 1; 2274 cb->args[1] = 1;
2249done: 2275done:
2276 rcu_read_unlock();
2250 return skb->len; 2277 return skb->len;
2251} 2278}
2252 2279
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2260 if (cb->args[1]) 2287 if (cb->args[1])
2261 return skb->len; 2288 return skb->len;
2262 2289
2263 list_for_each_entry(table, &ctx->afi->tables, list) { 2290 rcu_read_lock();
2291 cb->seq = ctx->net->nft.base_seq;
2292
2293 list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
2264 if (cur_table) { 2294 if (cur_table) {
2265 if (cur_table != table) 2295 if (cur_table != table)
2266 continue; 2296 continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2269 } 2299 }
2270 ctx->table = table; 2300 ctx->table = table;
2271 idx = 0; 2301 idx = 0;
2272 list_for_each_entry(set, &ctx->table->sets, list) { 2302 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2273 if (idx < s_idx) 2303 if (idx < s_idx)
2274 goto cont; 2304 goto cont;
2275 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2305 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2278 cb->args[2] = (unsigned long) table; 2308 cb->args[2] = (unsigned long) table;
2279 goto done; 2309 goto done;
2280 } 2310 }
2311 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2281cont: 2312cont:
2282 idx++; 2313 idx++;
2283 } 2314 }
2284 } 2315 }
2285 cb->args[1] = 1; 2316 cb->args[1] = 1;
2286done: 2317done:
2318 rcu_read_unlock();
2287 return skb->len; 2319 return skb->len;
2288} 2320}
2289 2321
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2300 if (cb->args[1]) 2332 if (cb->args[1])
2301 return skb->len; 2333 return skb->len;
2302 2334
2303 list_for_each_entry(afi, &net->nft.af_info, list) { 2335 rcu_read_lock();
2336 cb->seq = net->nft.base_seq;
2337
2338 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
2304 if (cur_family) { 2339 if (cur_family) {
2305 if (afi->family != cur_family) 2340 if (afi->family != cur_family)
2306 continue; 2341 continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2308 cur_family = 0; 2343 cur_family = 0;
2309 } 2344 }
2310 2345
2311 list_for_each_entry(table, &afi->tables, list) { 2346 list_for_each_entry_rcu(table, &afi->tables, list) {
2312 if (cur_table) { 2347 if (cur_table) {
2313 if (cur_table != table) 2348 if (cur_table != table)
2314 continue; 2349 continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2319 ctx->table = table; 2354 ctx->table = table;
2320 ctx->afi = afi; 2355 ctx->afi = afi;
2321 idx = 0; 2356 idx = 0;
2322 list_for_each_entry(set, &ctx->table->sets, list) { 2357 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2323 if (idx < s_idx) 2358 if (idx < s_idx)
2324 goto cont; 2359 goto cont;
2325 if (nf_tables_fill_set(skb, ctx, set, 2360 if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2330 cb->args[3] = afi->family; 2365 cb->args[3] = afi->family;
2331 goto done; 2366 goto done;
2332 } 2367 }
2368 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2333cont: 2369cont:
2334 idx++; 2370 idx++;
2335 } 2371 }
@@ -2339,6 +2375,7 @@ cont:
2339 } 2375 }
2340 cb->args[1] = 1; 2376 cb->args[1] = 1;
2341done: 2377done:
2378 rcu_read_unlock();
2342 return skb->len; 2379 return skb->len;
2343} 2380}
2344 2381
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2597 if (err < 0) 2634 if (err < 0)
2598 goto err2; 2635 goto err2;
2599 2636
2600 list_add_tail(&set->list, &table->sets); 2637 list_add_tail_rcu(&set->list, &table->sets);
2601 table->use++; 2638 table->use++;
2602 return 0; 2639 return 0;
2603 2640
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
2617 2654
2618static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2655static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2619{ 2656{
2620 list_del(&set->list); 2657 list_del_rcu(&set->list);
2621 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); 2658 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2622 nft_set_destroy(set); 2659 nft_set_destroy(set);
2623} 2660}
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2652 if (err < 0) 2689 if (err < 0)
2653 return err; 2690 return err;
2654 2691
2655 list_del(&set->list); 2692 list_del_rcu(&set->list);
2656 ctx.table->use--; 2693 ctx.table->use--;
2657 return 0; 2694 return 0;
2658} 2695}
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2704 } 2741 }
2705bind: 2742bind:
2706 binding->chain = ctx->chain; 2743 binding->chain = ctx->chain;
2707 list_add_tail(&binding->list, &set->bindings); 2744 list_add_tail_rcu(&binding->list, &set->bindings);
2708 return 0; 2745 return 0;
2709} 2746}
2710 2747
2711void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 2748void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2712 struct nft_set_binding *binding) 2749 struct nft_set_binding *binding)
2713{ 2750{
2714 list_del(&binding->list); 2751 list_del_rcu(&binding->list);
2715 2752
2716 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && 2753 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2717 !(set->flags & NFT_SET_INACTIVE)) 2754 !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
3346 struct nft_set *set; 3383 struct nft_set *set;
3347 3384
3348 /* Bump generation counter, invalidate any dump in progress */ 3385 /* Bump generation counter, invalidate any dump in progress */
3349 net->nft.genctr++; 3386 while (++net->nft.base_seq == 0);
3350 3387
3351 /* A new generation has just started */ 3388 /* A new generation has just started */
3352 net->nft.gencursor = gencursor_next(net); 3389 net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3491 } 3528 }
3492 nft_trans_destroy(trans); 3529 nft_trans_destroy(trans);
3493 } else { 3530 } else {
3494 list_del(&trans->ctx.table->list); 3531 list_del_rcu(&trans->ctx.table->list);
3495 } 3532 }
3496 break; 3533 break;
3497 case NFT_MSG_DELTABLE: 3534 case NFT_MSG_DELTABLE:
3498 list_add_tail(&trans->ctx.table->list, 3535 list_add_tail_rcu(&trans->ctx.table->list,
3499 &trans->ctx.afi->tables); 3536 &trans->ctx.afi->tables);
3500 nft_trans_destroy(trans); 3537 nft_trans_destroy(trans);
3501 break; 3538 break;
3502 case NFT_MSG_NEWCHAIN: 3539 case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3507 nft_trans_destroy(trans); 3544 nft_trans_destroy(trans);
3508 } else { 3545 } else {
3509 trans->ctx.table->use--; 3546 trans->ctx.table->use--;
3510 list_del(&trans->ctx.chain->list); 3547 list_del_rcu(&trans->ctx.chain->list);
3511 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && 3548 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3512 trans->ctx.chain->flags & NFT_BASE_CHAIN) { 3549 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3513 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, 3550 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
3517 break; 3554 break;
3518 case NFT_MSG_DELCHAIN: 3555 case NFT_MSG_DELCHAIN:
3519 trans->ctx.table->use++; 3556 trans->ctx.table->use++;
3520 list_add_tail(&trans->ctx.chain->list, 3557 list_add_tail_rcu(&trans->ctx.chain->list,
3521 &trans->ctx.table->chains); 3558 &trans->ctx.table->chains);
3522 nft_trans_destroy(trans); 3559 nft_trans_destroy(trans);
3523 break; 3560 break;
3524 case NFT_MSG_NEWRULE: 3561 case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3532 break; 3569 break;
3533 case NFT_MSG_NEWSET: 3570 case NFT_MSG_NEWSET:
3534 trans->ctx.table->use--; 3571 trans->ctx.table->use--;
3535 list_del(&nft_trans_set(trans)->list); 3572 list_del_rcu(&nft_trans_set(trans)->list);
3536 break; 3573 break;
3537 case NFT_MSG_DELSET: 3574 case NFT_MSG_DELSET:
3538 trans->ctx.table->use++; 3575 trans->ctx.table->use++;
3539 list_add_tail(&nft_trans_set(trans)->list, 3576 list_add_tail_rcu(&nft_trans_set(trans)->list,
3540 &trans->ctx.table->sets); 3577 &trans->ctx.table->sets);
3541 nft_trans_destroy(trans); 3578 nft_trans_destroy(trans);
3542 break; 3579 break;
3543 case NFT_MSG_NEWSETELEM: 3580 case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
3951{ 3988{
3952 INIT_LIST_HEAD(&net->nft.af_info); 3989 INIT_LIST_HEAD(&net->nft.af_info);
3953 INIT_LIST_HEAD(&net->nft.commit_list); 3990 INIT_LIST_HEAD(&net->nft.commit_list);
3991 net->nft.base_seq = 1;
3954 return 0; 3992 return 0;
3955} 3993}
3956 3994
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb1720b..3b90eb2b2c55 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
109 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
110 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
112 struct nft_stats __percpu *stats; 112 struct nft_stats *stats;
113 int rulenum; 113 int rulenum;
114 /* 114 /*
115 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206 206
207 rcu_read_lock_bh(); 207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats); 208 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
209 __this_cpu_inc(stats->pkts); 209 u64_stats_update_begin(&stats->syncp);
210 __this_cpu_add(stats->bytes, pkt->skb->len); 210 stats->pkts++;
211 stats->bytes += pkt->skb->len;
212 u64_stats_update_end(&stats->syncp);
211 rcu_read_unlock_bh(); 213 rcu_read_unlock_bh();
212 214
213 return nft_base_chain(basechain)->policy; 215 return nft_base_chain(basechain)->policy;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583ace32..70c0be8d0121 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/bitmap.h>
41#include <net/netlink.h> 42#include <net/netlink.h>
42#include <net/act_api.h> 43#include <net/act_api.h>
43#include <net/pkt_cls.h> 44#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
460 return 0; 461 return 0;
461} 462}
462 463
464#define NR_U32_NODE (1<<12)
463static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) 465static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
464{ 466{
465 struct tc_u_knode *n; 467 struct tc_u_knode *n;
466 unsigned int i = 0x7FF; 468 unsigned long i;
469 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
470 GFP_KERNEL);
471 if (!bitmap)
472 return handle | 0xFFF;
467 473
468 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) 474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
469 if (i < TC_U32_NODE(n->handle)) 475 set_bit(TC_U32_NODE(n->handle), bitmap);
470 i = TC_U32_NODE(n->handle);
471 i++;
472 476
473 return handle | (i > 0xFFF ? 0xFFF : i); 477 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478 if (i >= NR_U32_NODE)
479 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
480
481 kfree(bitmap);
482 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
474} 483}
475 484
476static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 485static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 6af50eb80ea7..70faa3a32526 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
379 struct special_params *params = bebob->maudio_special_quirk; 379 struct special_params *params = bebob->maudio_special_quirk;
380 int err, id; 380 int err, id;
381 381
382 mutex_lock(&bebob->mutex);
383
384 id = uval->value.enumerated.item[0]; 382 id = uval->value.enumerated.item[0];
385 if (id >= ARRAY_SIZE(special_clk_labels)) 383 if (id >= ARRAY_SIZE(special_clk_labels))
386 return 0; 384 return -EINVAL;
385
386 mutex_lock(&bebob->mutex);
387 387
388 err = avc_maudio_set_special_clk(bebob, id, 388 err = avc_maudio_set_special_clk(bebob, id,
389 params->dig_in_fmt, 389 params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
391 params->clk_lock); 391 params->clk_lock);
392 mutex_unlock(&bebob->mutex); 392 mutex_unlock(&bebob->mutex);
393 393
394 return err >= 0; 394 if (err >= 0)
395 err = 1;
396
397 return err;
395} 398}
396static struct snd_kcontrol_new special_clk_ctl = { 399static struct snd_kcontrol_new special_clk_ctl = {
397 .name = "Clock Source", 400 .name = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
434 .get = special_sync_ctl_get, 437 .get = special_sync_ctl_get,
435}; 438};
436 439
437/* Digital interface control for special firmware */ 440/* Digital input interface control for special firmware */
438static char *const special_dig_iface_labels[] = { 441static char *const special_dig_in_iface_labels[] = {
439 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical" 442 "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
440}; 443};
441static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl, 444static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
443{ 446{
444 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 447 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
445 einf->count = 1; 448 einf->count = 1;
446 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels); 449 einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
447 450
448 if (einf->value.enumerated.item >= einf->value.enumerated.items) 451 if (einf->value.enumerated.item >= einf->value.enumerated.items)
449 einf->value.enumerated.item = einf->value.enumerated.items - 1; 452 einf->value.enumerated.item = einf->value.enumerated.items - 1;
450 453
451 strcpy(einf->value.enumerated.name, 454 strcpy(einf->value.enumerated.name,
452 special_dig_iface_labels[einf->value.enumerated.item]); 455 special_dig_in_iface_labels[einf->value.enumerated.item]);
453 456
454 return 0; 457 return 0;
455} 458}
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
491 unsigned int id, dig_in_fmt, dig_in_iface; 494 unsigned int id, dig_in_fmt, dig_in_iface;
492 int err; 495 int err;
493 496
494 mutex_lock(&bebob->mutex);
495
496 id = uval->value.enumerated.item[0]; 497 id = uval->value.enumerated.item[0];
498 if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
499 return -EINVAL;
497 500
498 /* decode user value */ 501 /* decode user value */
499 dig_in_fmt = (id >> 1) & 0x01; 502 dig_in_fmt = (id >> 1) & 0x01;
500 dig_in_iface = id & 0x01; 503 dig_in_iface = id & 0x01;
501 504
505 mutex_lock(&bebob->mutex);
506
502 err = avc_maudio_set_special_clk(bebob, 507 err = avc_maudio_set_special_clk(bebob,
503 params->clk_src, 508 params->clk_src,
504 dig_in_fmt, 509 dig_in_fmt,
505 params->dig_out_fmt, 510 params->dig_out_fmt,
506 params->clk_lock); 511 params->clk_lock);
507 if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */ 512 if (err < 0)
513 goto end;
514
515 /* For ADAT, optical interface is only available. */
516 if (params->dig_in_fmt > 0) {
517 err = 1;
508 goto end; 518 goto end;
519 }
509 520
521 /* For S/PDIF, optical/coaxial interfaces are selectable. */
510 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface); 522 err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
511 if (err < 0) 523 if (err < 0)
512 dev_err(&bebob->unit->device, 524 dev_err(&bebob->unit->device,
513 "fail to set digital input interface: %d\n", err); 525 "fail to set digital input interface: %d\n", err);
526 err = 1;
514end: 527end:
515 special_stream_formation_set(bebob); 528 special_stream_formation_set(bebob);
516 mutex_unlock(&bebob->mutex); 529 mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
525 .put = special_dig_in_iface_ctl_set 538 .put = special_dig_in_iface_ctl_set
526}; 539};
527 540
541/* Digital output interface control for special firmware */
542static char *const special_dig_out_iface_labels[] = {
543 "S/PDIF Optical and Coaxial", "ADAT Optical"
544};
528static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl, 545static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
529 struct snd_ctl_elem_info *einf) 546 struct snd_ctl_elem_info *einf)
530{ 547{
531 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; 548 einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
532 einf->count = 1; 549 einf->count = 1;
533 einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1; 550 einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
534 551
535 if (einf->value.enumerated.item >= einf->value.enumerated.items) 552 if (einf->value.enumerated.item >= einf->value.enumerated.items)
536 einf->value.enumerated.item = einf->value.enumerated.items - 1; 553 einf->value.enumerated.item = einf->value.enumerated.items - 1;
537 554
538 strcpy(einf->value.enumerated.name, 555 strcpy(einf->value.enumerated.name,
539 special_dig_iface_labels[einf->value.enumerated.item + 1]); 556 special_dig_out_iface_labels[einf->value.enumerated.item]);
540 557
541 return 0; 558 return 0;
542} 559}
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
558 unsigned int id; 575 unsigned int id;
559 int err; 576 int err;
560 577
561 mutex_lock(&bebob->mutex);
562
563 id = uval->value.enumerated.item[0]; 578 id = uval->value.enumerated.item[0];
579 if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
580 return -EINVAL;
581
582 mutex_lock(&bebob->mutex);
564 583
565 err = avc_maudio_set_special_clk(bebob, 584 err = avc_maudio_set_special_clk(bebob,
566 params->clk_src, 585 params->clk_src,
567 params->dig_in_fmt, 586 params->dig_in_fmt,
568 id, params->clk_lock); 587 id, params->clk_lock);
569 if (err >= 0) 588 if (err >= 0) {
570 special_stream_formation_set(bebob); 589 special_stream_formation_set(bebob);
590 err = 1;
591 }
571 592
572 mutex_unlock(&bebob->mutex); 593 mutex_unlock(&bebob->mutex);
573 return err; 594 return err;