aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/input/event-codes.txt13
-rw-r--r--MAINTAINERS16
-rw-r--r--arch/arm64/mm/init.c17
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/mm/init.c1
-rw-r--r--arch/powerpc/include/asm/cputable.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c65
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/lib/mem_64.S2
-rw-r--r--arch/powerpc/lib/sstep.c10
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/kernel/head.S6
-rw-r--r--arch/s390/kernel/ptrace.c12
-rw-r--r--arch/s390/pci/pci.c49
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/sys32.S1
-rw-r--r--arch/sparc/kernel/systbls_32.S1
-rw-r--r--arch/sparc/kernel/systbls_64.S2
-rw-r--r--arch/x86/kvm/x86.c12
-rw-r--r--arch/xtensa/kernel/vectors.S158
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S4
-rw-r--r--arch/xtensa/mm/init.c2
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-tag.c33
-rw-r--r--block/compat_ioctl.c1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c20
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c20
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--fs/coredump.c2
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/xattr.c2
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/pagemap.h12
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/nftables.h2
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_clock.c9
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c3
-rw-r--r--mm/rmap.c10
-rw-r--r--mm/shmem.c102
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/truncate.c11
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c44
-rw-r--r--net/batman-adv/soft-interface.c60
-rw-r--r--net/batman-adv/translation-table.c26
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/dns_resolver/dns_query.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/gre_offload.c3
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/tcp_offload.c2
-rw-r--r--net/ipv6/tcpv6_offload.c2
-rw-r--r--net/netfilter/nf_tables_api.c140
-rw-r--r--net/netfilter/nf_tables_core.c10
-rw-r--r--net/sched/cls_u32.c19
110 files changed, 941 insertions, 445 deletions
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index f1ea2c69648d..c587a966413e 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT 281If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
282device. 282device.
283 283
284INPUT_PROP_TOPBUTTONPAD:
285-----------------------
286Some laptops, most notably the Lenovo *40 series provide a trackstick
287device but do not have physical buttons associated with the trackstick
288device. Instead, the top area of the touchpad is marked to show
289visual/haptic areas for left, middle, right buttons intended to be used
290with the trackstick.
291
292If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
293accordingly. This property does not affect kernel behavior.
294The kernel does not provide button emulation for such devices but treats
295them as any other INPUT_PROP_BUTTONPAD device.
296
284Guidelines: 297Guidelines:
285========== 298==========
286The guidelines below ensure proper single-touch and multi-finger functionality. 299The guidelines below ensure proper single-touch and multi-finger functionality.
diff --git a/MAINTAINERS b/MAINTAINERS
index 61a8f486306b..86efa7e213c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6956,6 +6956,12 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6956S: Maintained 6956S: Maintained
6957F: drivers/pinctrl/pinctrl-at91.c 6957F: drivers/pinctrl/pinctrl-at91.c
6958 6958
6959PIN CONTROLLER - RENESAS
6960M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6961L: linux-sh@vger.kernel.org
6962S: Maintained
6963F: drivers/pinctrl/sh-pfc/
6964
6959PIN CONTROLLER - SAMSUNG 6965PIN CONTROLLER - SAMSUNG
6960M: Tomasz Figa <t.figa@samsung.com> 6966M: Tomasz Figa <t.figa@samsung.com>
6961M: Thomas Abraham <thomas.abraham@linaro.org> 6967M: Thomas Abraham <thomas.abraham@linaro.org>
@@ -8019,6 +8025,16 @@ F: drivers/ata/
8019F: include/linux/ata.h 8025F: include/linux/ata.h
8020F: include/linux/libata.h 8026F: include/linux/libata.h
8021 8027
8028SERIAL ATA AHCI PLATFORM devices support
8029M: Hans de Goede <hdegoede@redhat.com>
8030M: Tejun Heo <tj@kernel.org>
8031L: linux-ide@vger.kernel.org
8032T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
8033S: Supported
8034F: drivers/ata/ahci_platform.c
8035F: drivers/ata/libahci_platform.c
8036F: include/linux/ahci_platform.h
8037
8022SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 8038SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
8023M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> 8039M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
8024L: linux-scsi@vger.kernel.org 8040L: linux-scsi@vger.kernel.org
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f43db8a69262..e90c5426fe14 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
60early_param("initrd", early_initrd); 60early_param("initrd", early_initrd);
61#endif 61#endif
62 62
63/*
64 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
65 * currently assumes that for memory starting above 4G, 32-bit devices will
66 * use a DMA offset.
67 */
68static phys_addr_t max_zone_dma_phys(void)
69{
70 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
71 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
72}
73
63static void __init zone_sizes_init(unsigned long min, unsigned long max) 74static void __init zone_sizes_init(unsigned long min, unsigned long max)
64{ 75{
65 struct memblock_region *reg; 76 struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
70 81
71 /* 4GB maximum for 32-bit only capable devices */ 82 /* 4GB maximum for 32-bit only capable devices */
72 if (IS_ENABLED(CONFIG_ZONE_DMA)) { 83 if (IS_ENABLED(CONFIG_ZONE_DMA)) {
73 unsigned long max_dma_phys = 84 max_dma = PFN_DOWN(max_zone_dma_phys());
74 (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1);
75 max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
76 zone_size[ZONE_DMA] = max_dma - min; 85 zone_size[ZONE_DMA] = max_dma - min;
77 } 86 }
78 zone_size[ZONE_NORMAL] = max - max_dma; 87 zone_size[ZONE_NORMAL] = max - max_dma;
@@ -146,7 +155,7 @@ void __init arm64_memblock_init(void)
146 155
147 /* 4GB maximum for 32-bit only capable devices */ 156 /* 4GB maximum for 32-bit only capable devices */
148 if (IS_ENABLED(CONFIG_ZONE_DMA)) 157 if (IS_ENABLED(CONFIG_ZONE_DMA))
149 dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; 158 dma_phys_limit = max_zone_dma_phys();
150 dma_contiguous_reserve(dma_phys_limit); 159 dma_contiguous_reserve(dma_phys_limit);
151 160
152 memblock_allow_resize(); 161 memblock_allow_resize();
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index a2fa297196bc..f5645d6a89f2 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -69,8 +69,6 @@
69#define SA_NOMASK SA_NODEFER 69#define SA_NOMASK SA_NODEFER
70#define SA_ONESHOT SA_RESETHAND 70#define SA_ONESHOT SA_RESETHAND
71 71
72#define SA_RESTORER 0x04000000 /* obsolete -- ignored */
73
74#define MINSIGSTKSZ 2048 72#define MINSIGSTKSZ 2048
75#define SIGSTKSZ 8192 73#define SIGSTKSZ 8192
76 74
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ae085ad0fba0..0bef864264c0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
728#endif 728#endif
729 729
730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 730 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
731 memset(empty_zero_page, 0, PAGE_SIZE);
732} 731}
733 732
734static void __init gateway_init(void) 733static void __init gateway_init(void)
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index bc2347774f0a..0fdd7eece6d9 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ 447 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) 448 CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) 449#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
450#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
450#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ 451#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
451 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 452 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
452 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 453 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fddb72b48ce9..d645428a65a4 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
198 return rb; 198 return rb;
199} 199}
200 200
201static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) 201static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
202 bool is_base_size)
202{ 203{
204
203 int size, a_psize; 205 int size, a_psize;
204 /* Look at the 8 bit LP value */ 206 /* Look at the 8 bit LP value */
205 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); 207 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
214 continue; 216 continue;
215 217
216 a_psize = __hpte_actual_psize(lp, size); 218 a_psize = __hpte_actual_psize(lp, size);
217 if (a_psize != -1) 219 if (a_psize != -1) {
220 if (is_base_size)
221 return 1ul << mmu_psize_defs[size].shift;
218 return 1ul << mmu_psize_defs[a_psize].shift; 222 return 1ul << mmu_psize_defs[a_psize].shift;
223 }
219 } 224 }
220 225
221 } 226 }
222 return 0; 227 return 0;
223} 228}
224 229
230static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
231{
232 return __hpte_page_size(h, l, 0);
233}
234
235static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
236{
237 return __hpte_page_size(h, l, 1);
238}
239
225static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) 240static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
226{ 241{
227 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; 242 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 807014dde821..c2b4dcf23d03 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -22,6 +22,7 @@
22 */ 22 */
23#include <asm/pgtable-ppc64.h> 23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h> 24#include <asm/bug.h>
25#include <asm/processor.h>
25 26
26/* 27/*
27 * Segment table 28 * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
496 */ 497 */
497struct subpage_prot_table { 498struct subpage_prot_table {
498 unsigned long maxaddr; /* only addresses < this are protected */ 499 unsigned long maxaddr; /* only addresses < this are protected */
499 unsigned int **protptrs[2]; 500 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
500 unsigned int *low_prot[4]; 501 unsigned int *low_prot[4];
501}; 502};
502 503
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 9ea266eae33e..7e4612528546 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -277,6 +277,8 @@ n:
277 .globl n; \ 277 .globl n; \
278n: 278n:
279 279
280#define _GLOBAL_TOC(name) _GLOBAL(name)
281
280#define _KPROBE(n) \ 282#define _KPROBE(n) \
281 .section ".kprobes.text","a"; \ 283 .section ".kprobes.text","a"; \
282 .globl n; \ 284 .globl n; \
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 965291b4c2fa..0c157642c2a1 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
527 .machine_check_early = __machine_check_early_realmode_p8, 527 .machine_check_early = __machine_check_early_realmode_p8,
528 .platform = "power8", 528 .platform = "power8",
529 }, 529 },
530 { /* Power8 DD1: Does not support doorbell IPIs */
531 .pvr_mask = 0xffffff00,
532 .pvr_value = 0x004d0100,
533 .cpu_name = "POWER8 (raw)",
534 .cpu_features = CPU_FTRS_POWER8_DD1,
535 .cpu_user_features = COMMON_USER_POWER8,
536 .cpu_user_features2 = COMMON_USER2_POWER8,
537 .mmu_features = MMU_FTRS_POWER8,
538 .icache_bsize = 128,
539 .dcache_bsize = 128,
540 .num_pmcs = 6,
541 .pmc_type = PPC_PMC_IBM,
542 .oprofile_cpu_type = "ppc64/power8",
543 .oprofile_type = PPC_OPROFILE_INVALID,
544 .cpu_setup = __setup_cpu_power8,
545 .cpu_restore = __restore_cpu_power8,
546 .flush_tlb = __flush_tlb_power8,
547 .machine_check_early = __machine_check_early_realmode_p8,
548 .platform = "power8",
549 },
530 { /* Power8 */ 550 { /* Power8 */
531 .pvr_mask = 0xffff0000, 551 .pvr_mask = 0xffff0000,
532 .pvr_value = 0x004d0000, 552 .pvr_value = 0x004d0000,
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..68468d695f12 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1562 goto out; 1562 goto out;
1563 } 1563 }
1564 if (!rma_setup && is_vrma_hpte(v)) { 1564 if (!rma_setup && is_vrma_hpte(v)) {
1565 unsigned long psize = hpte_page_size(v, r); 1565 unsigned long psize = hpte_base_page_size(v, r);
1566 unsigned long senc = slb_pgsize_encoding(psize); 1566 unsigned long senc = slb_pgsize_encoding(psize);
1567 unsigned long lpcr; 1567 unsigned long lpcr;
1568 1568
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..5a24d3c2b6b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
814 r = hpte[i+1]; 814 r = hpte[i+1];
815 815
816 /* 816 /*
817 * Check the HPTE again, including large page size 817 * Check the HPTE again, including base page size
818 * Since we don't currently allow any MPSS (mixed
819 * page-size segment) page sizes, it is sufficient
820 * to check against the actual page size.
821 */ 818 */
822 if ((v & valid) && (v & mask) == val && 819 if ((v & valid) && (v & mask) == val &&
823 hpte_page_size(v, r) == (1ul << pshift)) 820 hpte_base_page_size(v, r) == (1ul << pshift))
824 /* Return with the HPTE still locked */ 821 /* Return with the HPTE still locked */
825 return (hash << 3) + (i >> 1); 822 return (hash << 3) + (i >> 1);
826 823
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..558a67df8126 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
48 * 48 *
49 * LR = return address to continue at after eventually re-enabling MMU 49 * LR = return address to continue at after eventually re-enabling MMU
50 */ 50 */
51_GLOBAL(kvmppc_hv_entry_trampoline) 51_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
52 mflr r0 52 mflr r0
53 std r0, PPC_LR_STKOFF(r1) 53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1) 54 stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#if defined(_CALL_ELF) && _CALL_ELF == 2
29#define FUNC(name) name
30#else
28#define FUNC(name) GLUE(.,name) 31#define FUNC(name) GLUE(.,name)
32#endif
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU 33#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30 34
31#elif defined(CONFIG_PPC_BOOK3S_32) 35#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
36 36
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#if defined(_CALL_ELF) && _CALL_ELF == 2
40#define FUNC(name) name
41#else
39#define FUNC(name) GLUE(.,name) 42#define FUNC(name) GLUE(.,name)
43#endif
40 44
41#elif defined(CONFIG_PPC_BOOK3S_32) 45#elif defined(CONFIG_PPC_BOOK3S_32)
42 46
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
146 * On entry, r4 contains the guest shadow MSR 150 * On entry, r4 contains the guest shadow MSR
147 * MSR.EE has to be 0 when calling this function 151 * MSR.EE has to be 0 when calling this function
148 */ 152 */
149_GLOBAL(kvmppc_entry_trampoline) 153_GLOBAL_TOC(kvmppc_entry_trampoline)
150 mfmsr r5 154 mfmsr r5
151 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 155 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
152 toreal(r7) 156 toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba992b3..ef27fbd5d9c5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
23 u32 irq, server, priority; 23 u32 irq, server, priority;
24 int rc; 24 int rc;
25 25
26 if (args->nargs != 3 || args->nret != 1) { 26 if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
27 rc = -3; 27 rc = -3;
28 goto out; 28 goto out;
29 } 29 }
30 30
31 irq = args->args[0]; 31 irq = be32_to_cpu(args->args[0]);
32 server = args->args[1]; 32 server = be32_to_cpu(args->args[1]);
33 priority = args->args[2]; 33 priority = be32_to_cpu(args->args[2]);
34 34
35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); 35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
36 if (rc) 36 if (rc)
37 rc = -3; 37 rc = -3;
38out: 38out:
39 args->rets[0] = rc; 39 args->rets[0] = cpu_to_be32(rc);
40} 40}
41 41
42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) 42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
44 u32 irq, server, priority; 44 u32 irq, server, priority;
45 int rc; 45 int rc;
46 46
47 if (args->nargs != 1 || args->nret != 3) { 47 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
48 rc = -3; 48 rc = -3;
49 goto out; 49 goto out;
50 } 50 }
51 51
52 irq = args->args[0]; 52 irq = be32_to_cpu(args->args[0]);
53 53
54 server = priority = 0; 54 server = priority = 0;
55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); 55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
58 goto out; 58 goto out;
59 } 59 }
60 60
61 args->rets[1] = server; 61 args->rets[1] = cpu_to_be32(server);
62 args->rets[2] = priority; 62 args->rets[2] = cpu_to_be32(priority);
63out: 63out:
64 args->rets[0] = rc; 64 args->rets[0] = cpu_to_be32(rc);
65} 65}
66 66
67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) 67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
69 u32 irq; 69 u32 irq;
70 int rc; 70 int rc;
71 71
72 if (args->nargs != 1 || args->nret != 1) { 72 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
73 rc = -3; 73 rc = -3;
74 goto out; 74 goto out;
75 } 75 }
76 76
77 irq = args->args[0]; 77 irq = be32_to_cpu(args->args[0]);
78 78
79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); 79 rc = kvmppc_xics_int_off(vcpu->kvm, irq);
80 if (rc) 80 if (rc)
81 rc = -3; 81 rc = -3;
82out: 82out:
83 args->rets[0] = rc; 83 args->rets[0] = cpu_to_be32(rc);
84} 84}
85 85
86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) 86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
88 u32 irq; 88 u32 irq;
89 int rc; 89 int rc;
90 90
91 if (args->nargs != 1 || args->nret != 1) { 91 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
92 rc = -3; 92 rc = -3;
93 goto out; 93 goto out;
94 } 94 }
95 95
96 irq = args->args[0]; 96 irq = be32_to_cpu(args->args[0]);
97 97
98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); 98 rc = kvmppc_xics_int_on(vcpu->kvm, irq);
99 if (rc) 99 if (rc)
100 rc = -3; 100 rc = -3;
101out: 101out:
102 args->rets[0] = rc; 102 args->rets[0] = cpu_to_be32(rc);
103} 103}
104#endif /* CONFIG_KVM_XICS */ 104#endif /* CONFIG_KVM_XICS */
105 105
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
205 return rc; 205 return rc;
206} 206}
207 207
208static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
209{
210#ifdef __LITTLE_ENDIAN__
211 int i;
212
213 args->token = be32_to_cpu(args->token);
214 args->nargs = be32_to_cpu(args->nargs);
215 args->nret = be32_to_cpu(args->nret);
216 for (i = 0; i < args->nargs; i++)
217 args->args[i] = be32_to_cpu(args->args[i]);
218#endif
219}
220
221static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
222{
223#ifdef __LITTLE_ENDIAN__
224 int i;
225
226 for (i = 0; i < args->nret; i++)
227 args->args[i] = cpu_to_be32(args->args[i]);
228 args->token = cpu_to_be32(args->token);
229 args->nargs = cpu_to_be32(args->nargs);
230 args->nret = cpu_to_be32(args->nret);
231#endif
232}
233
234int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) 208int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
235{ 209{
236 struct rtas_token_definition *d; 210 struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
249 if (rc) 223 if (rc)
250 goto fail; 224 goto fail;
251 225
252 kvmppc_rtas_swap_endian_in(&args);
253
254 /* 226 /*
255 * args->rets is a pointer into args->args. Now that we've 227 * args->rets is a pointer into args->args. Now that we've
256 * copied args we need to fix it up to point into our copy, 228 * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
258 * value so we can restore it on the way out. 230 * value so we can restore it on the way out.
259 */ 231 */
260 orig_rets = args.rets; 232 orig_rets = args.rets;
261 args.rets = &args.args[args.nargs]; 233 args.rets = &args.args[be32_to_cpu(args.nargs)];
262 234
263 mutex_lock(&vcpu->kvm->lock); 235 mutex_lock(&vcpu->kvm->lock);
264 236
265 rc = -ENOENT; 237 rc = -ENOENT;
266 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { 238 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
267 if (d->token == args.token) { 239 if (d->token == be32_to_cpu(args.token)) {
268 d->handler->handler(vcpu, &args); 240 d->handler->handler(vcpu, &args);
269 rc = 0; 241 rc = 0;
270 break; 242 break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
275 247
276 if (rc == 0) { 248 if (rc == 0) {
277 args.rets = orig_rets; 249 args.rets = orig_rets;
278 kvmppc_rtas_swap_endian_out(&args);
279 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); 250 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
280 if (rc) 251 if (rc)
281 goto fail; 252 goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..86903d3f5a03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
473 if (printk_ratelimit()) 473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n", 474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn); 475 __func__, (long)gfn, pfn);
476 return -EINVAL; 476 ret = -EINVAL;
477 goto out;
477 } 478 }
478 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 479 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
479 480
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 0738f96befbf..43435c6892fb 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -77,7 +77,7 @@ _GLOBAL(memset)
77 stb r4,0(r6) 77 stb r4,0(r6)
78 blr 78 blr
79 79
80_GLOBAL(memmove) 80_GLOBAL_TOC(memmove)
81 cmplw 0,r3,r4 81 cmplw 0,r3,r4
82 bgt backwards_memcpy 82 bgt backwards_memcpy
83 b memcpy 83 b memcpy
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 412dd46dd0b7..5c09f365c842 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1198 sh = regs->gpr[rb] & 0x3f; 1198 sh = regs->gpr[rb] & 0x3f;
1199 ival = (signed int) regs->gpr[rd]; 1199 ival = (signed int) regs->gpr[rd];
1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); 1200 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1201 if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) 1201 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1202 regs->xer |= XER_CA; 1202 regs->xer |= XER_CA;
1203 else 1203 else
1204 regs->xer &= ~XER_CA; 1204 regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1208 sh = rb; 1208 sh = rb;
1209 ival = (signed int) regs->gpr[rd]; 1209 ival = (signed int) regs->gpr[rd];
1210 regs->gpr[ra] = ival >> sh; 1210 regs->gpr[ra] = ival >> sh;
1211 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1211 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1212 regs->xer |= XER_CA; 1212 regs->xer |= XER_CA;
1213 else 1213 else
1214 regs->xer &= ~XER_CA; 1214 regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1216 1216
1217#ifdef __powerpc64__ 1217#ifdef __powerpc64__
1218 case 27: /* sld */ 1218 case 27: /* sld */
1219 sh = regs->gpr[rd] & 0x7f; 1219 sh = regs->gpr[rb] & 0x7f;
1220 if (sh < 64) 1220 if (sh < 64)
1221 regs->gpr[ra] = regs->gpr[rd] << sh; 1221 regs->gpr[ra] = regs->gpr[rd] << sh;
1222 else 1222 else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1235 sh = regs->gpr[rb] & 0x7f; 1235 sh = regs->gpr[rb] & 0x7f;
1236 ival = (signed long int) regs->gpr[rd]; 1236 ival = (signed long int) regs->gpr[rd];
1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); 1237 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1238 if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) 1238 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1239 regs->xer |= XER_CA; 1239 regs->xer |= XER_CA;
1240 else 1240 else
1241 regs->xer &= ~XER_CA; 1241 regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1246 sh = rb | ((instr & 2) << 4); 1246 sh = rb | ((instr & 2) << 4);
1247 ival = (signed long int) regs->gpr[rd]; 1247 ival = (signed long int) regs->gpr[rd];
1248 regs->gpr[ra] = ival >> sh; 1248 regs->gpr[ra] = ival >> sh;
1249 if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) 1249 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1250 regs->xer |= XER_CA; 1250 regs->xer |= XER_CA;
1251 else 1251 else
1252 regs->xer &= ~XER_CA; 1252 regs->xer &= ~XER_CA;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 022b38e6a80b..2d0b4d68a40a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
86 } 86 }
87 87
88 of_node_set_flag(dn, OF_DYNAMIC); 88 of_node_set_flag(dn, OF_DYNAMIC);
89 of_node_init(dn);
89 90
90 return dn; 91 return dn;
91} 92}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 0435bb65d0aa..1c0a60d98867 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
69 69
70 np->properties = proplist; 70 np->properties = proplist;
71 of_node_set_flag(np, OF_DYNAMIC); 71 of_node_set_flag(np, OF_DYNAMIC);
72 of_node_init(np);
72 73
73 np->parent = derive_parent(path); 74 np->parent = derive_parent(path);
74 if (IS_ERR(np->parent)) { 75 if (IS_ERR(np->parent)) {
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index df38c70cd59e..18ea9e3f8142 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
51 return 0; 51 return 0;
52 52
53 asm volatile( 53 asm volatile(
54 "0: lfpc %1\n" 54 " lfpc %1\n"
55 " la %0,0\n" 55 "0: la %0,0\n"
56 "1:\n" 56 "1:\n"
57 EX_TABLE(0b,1b) 57 EX_TABLE(0b,1b)
58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); 58 : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 7ba7d6784510..e88d35d74950 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
437 437
438#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
439#if defined(CONFIG_MARCH_ZEC12) 439#if defined(CONFIG_MARCH_ZEC12)
440 .long 3, 0xc100efea, 0xf46ce800, 0x00400000 440 .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
441#elif defined(CONFIG_MARCH_Z196) 441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efea, 0xf46c0000 442 .long 2, 0xc100eff2, 0xf46c0000
443#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
444 .long 2, 0xc100efea, 0xf0680000 444 .long 2, 0xc100eff2, 0xf0680000
445#elif defined(CONFIG_MARCH_Z9_109) 445#elif defined(CONFIG_MARCH_Z9_109)
446 .long 1, 0xc100efc2 446 .long 1, 0xc100efc2
447#elif defined(CONFIG_MARCH_Z990) 447#elif defined(CONFIG_MARCH_Z990)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2d716734b5b1..5dc7ad9e2fbf 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
334 unsigned long mask = PSW_MASK_USER; 334 unsigned long mask = PSW_MASK_USER;
335 335
336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0; 336 mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
337 if ((data & ~mask) != PSW_USER_BITS) 337 if ((data ^ PSW_USER_BITS) & ~mask)
338 /* Invalid psw mask. */
339 return -EINVAL;
340 if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
341 /* Invalid address-space-control bits */
338 return -EINVAL; 342 return -EINVAL;
339 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) 343 if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
344 /* Invalid addressing mode bits */
340 return -EINVAL; 345 return -EINVAL;
341 } 346 }
342 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; 347 *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
672 677
673 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; 678 mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
674 /* Build a 64 bit psw mask from 31 bit mask. */ 679 /* Build a 64 bit psw mask from 31 bit mask. */
675 if ((tmp & ~mask) != PSW32_USER_BITS) 680 if ((tmp ^ PSW32_USER_BITS) & ~mask)
676 /* Invalid psw mask. */ 681 /* Invalid psw mask. */
677 return -EINVAL; 682 return -EINVAL;
683 if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
684 /* Invalid address-space-control bits */
685 return -EINVAL;
678 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 686 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
679 (regs->psw.mask & PSW_MASK_BA) | 687 (regs->psw.mask & PSW_MASK_BA) |
680 (__u64)(tmp & mask) << 32; 688 (__u64)(tmp & mask) << 32;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 9ddc51eeb8d6..30de42730b2f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -48,13 +48,10 @@
48static LIST_HEAD(zpci_list); 48static LIST_HEAD(zpci_list);
49static DEFINE_SPINLOCK(zpci_list_lock); 49static DEFINE_SPINLOCK(zpci_list_lock);
50 50
51static void zpci_enable_irq(struct irq_data *data);
52static void zpci_disable_irq(struct irq_data *data);
53
54static struct irq_chip zpci_irq_chip = { 51static struct irq_chip zpci_irq_chip = {
55 .name = "zPCI", 52 .name = "zPCI",
56 .irq_unmask = zpci_enable_irq, 53 .irq_unmask = unmask_msi_irq,
57 .irq_mask = zpci_disable_irq, 54 .irq_mask = mask_msi_irq,
58}; 55};
59 56
60static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
244 return rc; 241 return rc;
245} 242}
246 243
247static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
248{
249 int offset, pos;
250 u32 mask_bits;
251
252 if (msi->msi_attrib.is_msix) {
253 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
254 PCI_MSIX_ENTRY_VECTOR_CTRL;
255 msi->masked = readl(msi->mask_base + offset);
256 writel(flag, msi->mask_base + offset);
257 } else if (msi->msi_attrib.maskbit) {
258 pos = (long) msi->mask_base;
259 pci_read_config_dword(msi->dev, pos, &mask_bits);
260 mask_bits &= ~(mask);
261 mask_bits |= flag & mask;
262 pci_write_config_dword(msi->dev, pos, mask_bits);
263 } else
264 return 0;
265
266 msi->msi_attrib.maskbit = !!flag;
267 return 1;
268}
269
270static void zpci_enable_irq(struct irq_data *data)
271{
272 struct msi_desc *msi = irq_get_msi_desc(data->irq);
273
274 zpci_msi_set_mask_bits(msi, 1, 0);
275}
276
277static void zpci_disable_irq(struct irq_data *data)
278{
279 struct msi_desc *msi = irq_get_msi_desc(data->irq);
280
281 zpci_msi_set_mask_bits(msi, 1, 1);
282}
283
284void pcibios_fixup_bus(struct pci_bus *bus) 244void pcibios_fixup_bus(struct pci_bus *bus)
285{ 245{
286} 246}
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
487 447
488 /* Release MSI interrupts */ 448 /* Release MSI interrupts */
489 list_for_each_entry(msi, &pdev->msi_list, list) { 449 list_for_each_entry(msi, &pdev->msi_list, list) {
490 zpci_msi_set_mask_bits(msi, 1, 1); 450 if (msi->msi_attrib.is_msix)
451 default_msix_mask_irq(msi, 1);
452 else
453 default_msi_mask_irq(msi, 1, 1);
491 irq_set_msi_desc(msi->irq, NULL); 454 irq_set_msi_desc(msi->irq, NULL);
492 irq_free_desc(msi->irq); 455 irq_free_desc(msi->irq);
493 msi->msg.address_lo = 0; 456 msi->msg.address_lo = 0;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index d4d16e4be07c..bf5b3f5f4962 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -32,7 +32,8 @@ endif
32 32
33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) 33cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ 34cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
35 $(call cc-option,-m2a-nofpu,) 35 $(call cc-option,-m2a-nofpu,) \
36 $(call cc-option,-m4-nofpu,)
36cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) 37cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,)
37cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ 38cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
38 $(call cc-option,-mno-implicit-fp,-m4-nofpu) 39 $(call cc-option,-mno-implicit-fp,-m4-nofpu)
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index b73274fb961a..42f2bca1d338 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -410,8 +410,9 @@
410#define __NR_finit_module 342 410#define __NR_finit_module 342
411#define __NR_sched_setattr 343 411#define __NR_sched_setattr 343
412#define __NR_sched_getattr 344 412#define __NR_sched_getattr 344
413#define __NR_renameat2 345
413 414
414#define NR_syscalls 345 415#define NR_syscalls 346
415 416
416/* Bitmask values returned from kern_features system call. */ 417/* Bitmask values returned from kern_features system call. */
417#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 418#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S
index d066eb18650c..f834224208ed 100644
--- a/arch/sparc/kernel/sys32.S
+++ b/arch/sparc/kernel/sys32.S
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) 48SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) 49SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) 50SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
51SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
51 52
52 .globl sys32_mmap2 53 .globl sys32_mmap2
53sys32_mmap2: 54sys32_mmap2:
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 151ace8766cc..85fe9b1087cd 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,3 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 4bd4e2bb26cf..33ecba2826ea 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,6 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2
90 91
91#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
92 93
@@ -165,3 +166,4 @@ sys_call_table:
165/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
166 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
167/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f6449334ec45..ef432f891d30 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
5887 kvm_x86_ops->set_nmi(vcpu); 5887 kvm_x86_ops->set_nmi(vcpu);
5888 } 5888 }
5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) { 5889 } else if (kvm_cpu_has_injectable_intr(vcpu)) {
5890 /*
5891 * Because interrupts can be injected asynchronously, we are
5892 * calling check_nested_events again here to avoid a race condition.
5893 * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
5894 * proposal and current concerns. Perhaps we should be setting
5895 * KVM_REQ_EVENT only on certain events and not unconditionally?
5896 */
5897 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
5898 r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
5899 if (r != 0)
5900 return r;
5901 }
5890 if (kvm_x86_ops->interrupt_allowed(vcpu)) { 5902 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5891 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), 5903 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5892 false); 5904 false);
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index f9e1ec346e35..8453e6e39895 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
376 beqz a2, 1f # if at start of vector, don't restore 376 beqz a2, 1f # if at start of vector, don't restore
377 377
378 addi a0, a0, -128 378 addi a0, a0, -128
379 bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 379 bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12
380 bbsi a0, 7, 2f 380
381 /*
382 * This fixup handler is for the extremely unlikely case where the
383 * overflow handler's reference thru a0 gets a hardware TLB refill
384 * that bumps out the (distinct, aliasing) TLB entry that mapped its
385 * prior references thru a9/a13, and where our reference now thru
386 * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
387 */
388 movi a2, window_overflow_restore_a0_fixup
389 s32i a2, a3, EXC_TABLE_FIXUP
390 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
391 xsr a3, excsave1
392
393 bbsi.l a0, 7, 2f
381 394
382 /* 395 /*
383 * Restore a0 as saved by _WindowOverflow8(). 396 * Restore a0 as saved by _WindowOverflow8().
384 *
385 * FIXME: we really need a fixup handler for this L32E,
386 * for the extremely unlikely case where the overflow handler's
387 * reference thru a0 gets a hardware TLB refill that bumps out
388 * the (distinct, aliasing) TLB entry that mapped its prior
389 * references thru a9, and where our reference now thru a9
390 * gets a 2nd-level miss exception (not hardware TLB refill).
391 */ 397 */
392 398
393 l32e a2, a9, -16 399 l32e a0, a9, -16
394 wsr a2, depc # replace the saved a0 400 wsr a0, depc # replace the saved a0
395 j 1f 401 j 3f
396 402
3972: 4032:
398 /* 404 /*
399 * Restore a0 as saved by _WindowOverflow12(). 405 * Restore a0 as saved by _WindowOverflow12().
400 *
401 * FIXME: we really need a fixup handler for this L32E,
402 * for the extremely unlikely case where the overflow handler's
403 * reference thru a0 gets a hardware TLB refill that bumps out
404 * the (distinct, aliasing) TLB entry that mapped its prior
405 * references thru a13, and where our reference now thru a13
406 * gets a 2nd-level miss exception (not hardware TLB refill).
407 */ 406 */
408 407
409 l32e a2, a13, -16 408 l32e a0, a13, -16
410 wsr a2, depc # replace the saved a0 409 wsr a0, depc # replace the saved a0
4103:
411 xsr a3, excsave1
412 movi a0, 0
413 s32i a0, a3, EXC_TABLE_FIXUP
414 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
4111: 4151:
412 /* 416 /*
413 * Restore WindowBase while leaving all address registers restored. 417 * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
449 453
450 s32i a0, a2, PT_DEPC 454 s32i a0, a2, PT_DEPC
451 455
456_DoubleExceptionVector_handle_exception:
452 addx4 a0, a0, a3 457 addx4 a0, a0, a3
453 l32i a0, a0, EXC_TABLE_FAST_USER 458 l32i a0, a0, EXC_TABLE_FAST_USER
454 xsr a3, excsave1 459 xsr a3, excsave1
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow:
464 rotw -3 469 rotw -3
465 j 1b 470 j 1b
466 471
467 .end literal_prefix
468 472
469ENDPROC(_DoubleExceptionVector) 473ENDPROC(_DoubleExceptionVector)
470 474
471/* 475/*
476 * Fixup handler for TLB miss in double exception handler for window owerflow.
477 * We get here with windowbase set to the window that was being spilled and
478 * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
479 * (bit set) window.
480 *
481 * We do the following here:
482 * - go to the original window retaining a0 value;
483 * - set up exception stack to return back to appropriate a0 restore code
484 * (we'll need to rotate window back and there's no place to save this
485 * information, use different return address for that);
486 * - handle the exception;
487 * - go to the window that was being spilled;
488 * - set up window_overflow_restore_a0_fixup as a fixup routine;
489 * - reload a0;
490 * - restore the original window;
491 * - reset the default fixup routine;
492 * - return to user. By the time we get to this fixup handler all information
493 * about the conditions of the original double exception that happened in
494 * the window overflow handler is lost, so we just return to userspace to
495 * retry overflow from start.
496 *
497 * a0: value of depc, original value in depc
498 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
499 * a3: exctable, original value in excsave1
500 */
501
502ENTRY(window_overflow_restore_a0_fixup)
503
504 rsr a0, ps
505 extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
506 rsr a2, windowbase
507 sub a0, a2, a0
508 extui a0, a0, 0, 3
509 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
510 xsr a3, excsave1
511
512 _beqi a0, 1, .Lhandle_1
513 _beqi a0, 3, .Lhandle_3
514
515 .macro overflow_fixup_handle_exception_pane n
516
517 rsr a0, depc
518 rotw -\n
519
520 xsr a3, excsave1
521 wsr a2, depc
522 l32i a2, a3, EXC_TABLE_KSTK
523 s32i a0, a2, PT_AREG0
524
525 movi a0, .Lrestore_\n
526 s32i a0, a2, PT_DEPC
527 rsr a0, exccause
528 j _DoubleExceptionVector_handle_exception
529
530 .endm
531
532 overflow_fixup_handle_exception_pane 2
533.Lhandle_1:
534 overflow_fixup_handle_exception_pane 1
535.Lhandle_3:
536 overflow_fixup_handle_exception_pane 3
537
538 .macro overflow_fixup_restore_a0_pane n
539
540 rotw \n
541 /* Need to preserve a0 value here to be able to handle exception
542 * that may occur on a0 reload from stack. It may occur because
543 * TLB miss handler may not be atomic and pointer to page table
544 * may be lost before we get here. There are no free registers,
545 * so we need to use EXC_TABLE_DOUBLE_SAVE area.
546 */
547 xsr a3, excsave1
548 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
549 movi a2, window_overflow_restore_a0_fixup
550 s32i a2, a3, EXC_TABLE_FIXUP
551 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
552 xsr a3, excsave1
553 bbsi.l a0, 7, 1f
554 l32e a0, a9, -16
555 j 2f
5561:
557 l32e a0, a13, -16
5582:
559 rotw -\n
560
561 .endm
562
563.Lrestore_2:
564 overflow_fixup_restore_a0_pane 2
565
566.Lset_default_fixup:
567 xsr a3, excsave1
568 s32i a2, a3, EXC_TABLE_DOUBLE_SAVE
569 movi a2, 0
570 s32i a2, a3, EXC_TABLE_FIXUP
571 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE
572 xsr a3, excsave1
573 rfe
574
575.Lrestore_1:
576 overflow_fixup_restore_a0_pane 1
577 j .Lset_default_fixup
578.Lrestore_3:
579 overflow_fixup_restore_a0_pane 3
580 j .Lset_default_fixup
581
582ENDPROC(window_overflow_restore_a0_fixup)
583
584 .end literal_prefix
585/*
472 * Debug interrupt vector 586 * Debug interrupt vector
473 * 587 *
474 * There is not much space here, so simply jump to another handler. 588 * There is not much space here, so simply jump to another handler.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index ee32c0085dff..d16db6df86f8 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -269,13 +269,13 @@ SECTIONS
269 .UserExceptionVector.literal) 269 .UserExceptionVector.literal)
270 SECTION_VECTOR (_DoubleExceptionVector_literal, 270 SECTION_VECTOR (_DoubleExceptionVector_literal,
271 .DoubleExceptionVector.literal, 271 .DoubleExceptionVector.literal,
272 DOUBLEEXC_VECTOR_VADDR - 16, 272 DOUBLEEXC_VECTOR_VADDR - 40,
273 SIZEOF(.UserExceptionVector.text), 273 SIZEOF(.UserExceptionVector.text),
274 .UserExceptionVector.text) 274 .UserExceptionVector.text)
275 SECTION_VECTOR (_DoubleExceptionVector_text, 275 SECTION_VECTOR (_DoubleExceptionVector_text,
276 .DoubleExceptionVector.text, 276 .DoubleExceptionVector.text,
277 DOUBLEEXC_VECTOR_VADDR, 277 DOUBLEEXC_VECTOR_VADDR,
278 32, 278 40,
279 .DoubleExceptionVector.literal) 279 .DoubleExceptionVector.literal)
280 280
281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 4224256bb215..77ed20209ca5 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
191 return -EINVAL; 191 return -EINVAL;
192 } 192 }
193 193
194 if (it && start - it->start < bank_sz) { 194 if (it && start - it->start <= bank_sz) {
195 if (start == it->start) { 195 if (start == it->start) {
196 if (end - it->start < bank_sz) { 196 if (end - it->start < bank_sz) {
197 it->start = end; 197 it->start = end;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b9f4cc494ece..28d227c5ca77 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
872{ 872{
873 lockdep_assert_held(q->queue_lock); 873 lockdep_assert_held(q->queue_lock);
874 874
875 /*
876 * @q could be exiting and already have destroyed all blkgs as
877 * indicated by NULL root_blkg. If so, don't confuse policies.
878 */
879 if (!q->root_blkg)
880 return;
881
875 blk_throtl_drain(q); 882 blk_throtl_drain(q);
876} 883}
877 884
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3f33d8672268..a185b86741e5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
27EXPORT_SYMBOL(blk_queue_find_tag); 27EXPORT_SYMBOL(blk_queue_find_tag);
28 28
29/** 29/**
30 * __blk_free_tags - release a given set of tag maintenance info 30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free 31 * @bqt: the tag map to free
32 * 32 *
33 * Tries to free the specified @bqt. Returns true if it was 33 * Drop the reference count on @bqt and frees it when the last reference
34 * actually freed and false if there are still references using it 34 * is dropped.
35 */ 35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt) 36void blk_free_tags(struct blk_queue_tag *bqt)
37{ 37{
38 int retval; 38 if (atomic_dec_and_test(&bqt->refcnt)) {
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < 39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth); 40 bqt->max_depth);
44 41
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
50 47
51 kfree(bqt); 48 kfree(bqt);
52 } 49 }
53
54 return retval;
55} 50}
51EXPORT_SYMBOL(blk_free_tags);
56 52
57/** 53/**
58 * __blk_queue_free_tags - release tag maintenance info 54 * __blk_queue_free_tags - release tag maintenance info
@@ -69,28 +65,13 @@ void __blk_queue_free_tags(struct request_queue *q)
69 if (!bqt) 65 if (!bqt)
70 return; 66 return;
71 67
72 __blk_free_tags(bqt); 68 blk_free_tags(bqt);
73 69
74 q->queue_tags = NULL; 70 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); 71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76} 72}
77 73
78/** 74/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info 75 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device 76 * @q: the request queue for the device
96 * 77 *
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index fbd5a67cb773..a0926a6094b2 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
690 case BLKROSET: 690 case BLKROSET:
691 case BLKDISCARD: 691 case BLKDISCARD:
692 case BLKSECDISCARD: 692 case BLKSECDISCARD:
693 case BLKZEROOUT:
693 /* 694 /*
694 * the ones below are implemented in blkdev_locked_ioctl, 695 * the ones below are implemented in blkdev_locked_ioctl,
695 * but we call blkdev_ioctl, which gets the lock for us 696 * but we call blkdev_ioctl, which gets the lock for us
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e1115..4cd52a4541a9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
456 456
457 /* Promise */ 457 /* Promise */
458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ 458 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
459 { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
459 460
460 /* Asmedia */ 461 /* Asmedia */
461 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ 462 { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d90..677c0c1b03bd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4787 * ata_qc_new - Request an available ATA command, for queueing 4787 * ata_qc_new - Request an available ATA command, for queueing
4788 * @ap: target port 4788 * @ap: target port
4789 * 4789 *
4790 * Some ATA host controllers may implement a queue depth which is less
4791 * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
4792 * the hardware limitation.
4793 *
4790 * LOCKING: 4794 * LOCKING:
4791 * None. 4795 * None.
4792 */ 4796 */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
4794static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4798static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4795{ 4799{
4796 struct ata_queued_cmd *qc = NULL; 4800 struct ata_queued_cmd *qc = NULL;
4801 unsigned int max_queue = ap->host->n_tags;
4797 unsigned int i, tag; 4802 unsigned int i, tag;
4798 4803
4799 /* no command while frozen */ 4804 /* no command while frozen */
4800 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4805 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4801 return NULL; 4806 return NULL;
4802 4807
4803 for (i = 0; i < ATA_MAX_QUEUE; i++) { 4808 for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
4804 tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE; 4809 tag = tag < max_queue ? tag : 0;
4805 4810
4806 /* the last tag is reserved for internal command. */ 4811 /* the last tag is reserved for internal command. */
4807 if (tag == ATA_TAG_INTERNAL) 4812 if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
6088{ 6093{
6089 spin_lock_init(&host->lock); 6094 spin_lock_init(&host->lock);
6090 mutex_init(&host->eh_mutex); 6095 mutex_init(&host->eh_mutex);
6096 host->n_tags = ATA_MAX_QUEUE - 1;
6091 host->dev = dev; 6097 host->dev = dev;
6092 host->ops = ops; 6098 host->ops = ops;
6093} 6099}
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6169{ 6175{
6170 int i, rc; 6176 int i, rc;
6171 6177
6178 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
6179
6172 /* host must have been started */ 6180 /* host must have been started */
6173 if (!(host->flags & ATA_HOST_STARTED)) { 6181 if (!(host->flags & ATA_HOST_STARTED)) {
6174 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6182 dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b8..dad83df555c4 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1811 case ATA_DEV_ATA: 1811 case ATA_DEV_ATA:
1812 if (err & ATA_ICRC) 1812 if (err & ATA_ICRC)
1813 qc->err_mask |= AC_ERR_ATA_BUS; 1813 qc->err_mask |= AC_ERR_ATA_BUS;
1814 if (err & ATA_UNC) 1814 if (err & (ATA_UNC | ATA_AMNF))
1815 qc->err_mask |= AC_ERR_MEDIA; 1815 qc->err_mask |= AC_ERR_MEDIA;
1816 if (err & ATA_IDNF) 1816 if (err & ATA_IDNF)
1817 qc->err_mask |= AC_ERR_INVALID; 1817 qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
2556 } 2556 }
2557 2557
2558 if (cmd->command != ATA_CMD_PACKET && 2558 if (cmd->command != ATA_CMD_PACKET &&
2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF | 2559 (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
2560 ATA_ABORTED))) 2560 ATA_IDNF | ATA_ABORTED)))
2561 ata_dev_err(qc->dev, "error: { %s%s%s%s}\n", 2561 ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
2562 res->feature & ATA_ICRC ? "ICRC " : "", 2562 res->feature & ATA_ICRC ? "ICRC " : "",
2563 res->feature & ATA_UNC ? "UNC " : "", 2563 res->feature & ATA_UNC ? "UNC " : "",
2564 res->feature & ATA_AMNF ? "AMNF " : "",
2564 res->feature & ATA_IDNF ? "IDNF " : "", 2565 res->feature & ATA_IDNF ? "IDNF " : "",
2565 res->feature & ATA_ABORTED ? "ABRT " : ""); 2566 res->feature & ATA_ABORTED ? "ABRT " : "");
2566#endif 2567#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce34..4d37c5415fc7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
915 struct ep93xx_pata_data *drv_data; 915 struct ep93xx_pata_data *drv_data;
916 struct ata_host *host; 916 struct ata_host *host;
917 struct ata_port *ap; 917 struct ata_port *ap;
918 unsigned int irq; 918 int irq;
919 struct resource *mem_res; 919 struct resource *mem_res;
920 void __iomem *ide_base; 920 void __iomem *ide_base;
921 int err; 921 int err;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b7..3f2e16738080 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
544 struct task_struct *opa; 544 struct task_struct *opa;
545 545
546 kref_get(&connection->kref); 546 kref_get(&connection->kref);
547 /* We may just have force_sig()'ed this thread
548 * to get it out of some blocking network function.
549 * Clear signals; otherwise kthread_run(), which internally uses
550 * wait_on_completion_killable(), will mistake our pending signal
551 * for a new fatal signal and fail. */
552 flush_signals(current);
547 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); 553 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
548 if (IS_ERR(opa)) { 554 if (IS_ERR(opa)) {
549 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); 555 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37be..36e54be402df 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622 memset(&zram->stats, 0, sizeof(zram->stats)); 622 memset(&zram->stats, 0, sizeof(zram->stats));
623 623
624 zram->disksize = 0; 624 zram->disksize = 0;
625 if (reset_capacity) { 625 if (reset_capacity)
626 set_capacity(zram->disk, 0); 626 set_capacity(zram->disk, 0);
627 revalidate_disk(zram->disk); 627
628 }
629 up_write(&zram->init_lock); 628 up_write(&zram->init_lock);
629
630 /*
631 * Revalidate disk out of the init_lock to avoid lockdep splat.
632 * It's okay because disk's capacity is protected by init_lock
633 * so that revalidate_disk always sees up-to-date capacity.
634 */
635 if (reset_capacity)
636 revalidate_disk(zram->disk);
630} 637}
631 638
632static ssize_t disksize_store(struct device *dev, 639static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
666 zram->comp = comp; 673 zram->comp = comp;
667 zram->disksize = disksize; 674 zram->disksize = disksize;
668 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 675 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669 revalidate_disk(zram->disk);
670 up_write(&zram->init_lock); 676 up_write(&zram->init_lock);
677
678 /*
679 * Revalidate disk out of the init_lock to avoid lockdep splat.
680 * It's okay because disk's capacity is protected by init_lock
681 * so that revalidate_disk always sees up-to-date capacity.
682 */
683 revalidate_disk(zram->disk);
684
671 return len; 685 return len;
672 686
673out_destroy_comp: 687out_destroy_comp:
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1ac..b6ae89ea8811 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
284 284
285static struct irq_domain_ops gpio_rcar_irq_domain_ops = { 285static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
286 .map = gpio_rcar_irq_domain_map, 286 .map = gpio_rcar_irq_domain_map,
287 .xlate = irq_domain_xlate_twocell,
287}; 288};
288 289
289struct gpio_rcar_info { 290struct gpio_rcar_info {
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d87..a04c49f2a011 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
416 416
417config BLK_DEV_CS5520 417config BLK_DEV_CS5520
418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)" 418 tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
419 depends on X86_32 || COMPILE_TEST
419 select BLK_DEV_IDEDMA_PCI 420 select BLK_DEV_IDEDMA_PCI
420 help 421 help
421 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX 422 Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
426 427
427config BLK_DEV_CS5530 428config BLK_DEV_CS5530
428 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support" 429 tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
430 depends on X86_32 || COMPILE_TEST
429 select BLK_DEV_IDEDMA_PCI 431 select BLK_DEV_IDEDMA_PCI
430 help 432 help
431 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This 433 Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
435 437
436config BLK_DEV_CS5535 438config BLK_DEV_CS5535
437 tristate "AMD CS5535 chipset support" 439 tristate "AMD CS5535 chipset support"
438 depends on X86 && !X86_64 440 depends on X86_32
439 select BLK_DEV_IDEDMA_PCI 441 select BLK_DEV_IDEDMA_PCI
440 help 442 help
441 Include support for UDMA on the NSC/AMD CS5535 companion chipset. 443 Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
486 488
487config BLK_DEV_SC1200 489config BLK_DEV_SC1200
488 tristate "National SCx200 chipset support" 490 tristate "National SCx200 chipset support"
491 depends on X86_32 || COMPILE_TEST
489 select BLK_DEV_IDEDMA_PCI 492 select BLK_DEV_IDEDMA_PCI
490 help 493 help
491 This driver adds support for the on-board IDE controller on the 494 This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370e..a3d3b1733c49 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
853 if (irq_handler == NULL) 853 if (irq_handler == NULL)
854 irq_handler = ide_intr; 854 irq_handler = ide_intr;
855 855
856 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif)) 856 if (!host->get_lock)
857 goto out_up; 857 if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
858 goto out_up;
858 859
859#if !defined(__mc68000__) 860#if !defined(__mc68000__)
860 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name, 861 printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
1533 1534
1534 ide_proc_unregister_port(hwif); 1535 ide_proc_unregister_port(hwif);
1535 1536
1536 free_irq(hwif->irq, hwif); 1537 if (!hwif->host->get_lock)
1538 free_irq(hwif->irq, hwif);
1537 1539
1538 device_unregister(hwif->portdev); 1540 device_unregister(hwif->portdev);
1539 device_unregister(&hwif->gendev); 1541 device_unregister(&hwif->gendev);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db05550..29ca0bb4f561 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
257} 257}
258 258
259static int input_get_disposition(struct input_dev *dev, 259static int input_get_disposition(struct input_dev *dev,
260 unsigned int type, unsigned int code, int value) 260 unsigned int type, unsigned int code, int *pval)
261{ 261{
262 int disposition = INPUT_IGNORE_EVENT; 262 int disposition = INPUT_IGNORE_EVENT;
263 int value = *pval;
263 264
264 switch (type) { 265 switch (type) {
265 266
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
357 break; 358 break;
358 } 359 }
359 360
361 *pval = value;
360 return disposition; 362 return disposition;
361} 363}
362 364
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
365{ 367{
366 int disposition; 368 int disposition;
367 369
368 disposition = input_get_disposition(dev, type, code, value); 370 disposition = input_get_disposition(dev, type, code, &value);
369 371
370 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 372 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
371 dev->event(dev, type, code, value); 373 dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b48731415..de7be4f03d91 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
215 return 0; 215 return 0;
216} 216}
217 217
218#ifdef CONFIG_PM_SLEEP
218static int keyscan_suspend(struct device *dev) 219static int keyscan_suspend(struct device *dev)
219{ 220{
220 struct platform_device *pdev = to_platform_device(dev); 221 struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
249 mutex_unlock(&input->mutex); 250 mutex_unlock(&input->mutex);
250 return retval; 251 return retval;
251} 252}
253#endif
252 254
253static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume); 255static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
254 256
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6d..fed5102e1802 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
213 213
214module_platform_driver(sirfsoc_pwrc_driver); 214module_platform_driver(sirfsoc_pwrc_driver);
215 215
216MODULE_LICENSE("GPLv2"); 216MODULE_LICENSE("GPL v2");
217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>"); 217MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver"); 218MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
219MODULE_ALIAS("platform:sirfsoc-pwrc"); 219MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f06..ef9e0b8a9aa7 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
132 1232, 5710, 1156, 4696 132 1232, 5710, 1156, 4696
133 }, 133 },
134 { 134 {
135 (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, 135 (const char * const []){"LEN0034", "LEN0036", "LEN2002",
136 "LEN2004", NULL},
136 1024, 5112, 2024, 4832 137 1024, 5112, 2024, 4832
137 }, 138 },
138 { 139 {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
168 "LEN0049", 169 "LEN0049",
169 "LEN2000", 170 "LEN2000",
170 "LEN2001", /* Edge E431 */ 171 "LEN2001", /* Edge E431 */
171 "LEN2002", 172 "LEN2002", /* Edge E531 */
172 "LEN2003", 173 "LEN2003",
173 "LEN2004", /* L440 */ 174 "LEN2004", /* L440 */
174 "LEN2005", 175 "LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c561..136b7b204f56 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
402 }, 402 },
403 }, 403 },
404 { 404 {
405 /* Acer Aspire 5710 */
406 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
408 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
409 },
410 },
411 {
405 /* Gericom Bellagio */ 412 /* Gericom Bellagio */
406 .matches = { 413 .matches = {
407 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), 414 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2e..e73cf2c71f35 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
1217 * a=(pi*r^2)/C. 1217 * a=(pi*r^2)/C.
1218 */ 1218 */
1219 int a = data[5]; 1219 int a = data[5];
1220 int x_res = input_abs_get_res(input, ABS_X); 1220 int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
1221 int y_res = input_abs_get_res(input, ABS_Y); 1221 int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE); 1222 width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
1223 height = width * y_res / x_res; 1223 height = width * y_res / x_res;
1224 } 1224 }
1225 1225
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution); 1587 input_abs_set_res(input_dev, ABS_X, features->x_resolution);
1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution); 1588 input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
1589 } else { 1589 } else {
1590 if (features->touch_max <= 2) { 1590 if (features->touch_max == 1) {
1591 input_set_abs_params(input_dev, ABS_X, 0, 1591 input_set_abs_params(input_dev, ABS_X, 0,
1592 features->x_max, features->x_fuzz, 0); 1592 features->x_max, features->x_fuzz, 0);
1593 input_set_abs_params(input_dev, ABS_Y, 0, 1593 input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1815 case MTTPC: 1815 case MTTPC:
1816 case MTTPC_B: 1816 case MTTPC_B:
1817 case TABLETPC2FG: 1817 case TABLETPC2FG:
1818 if (features->device_type == BTN_TOOL_FINGER) { 1818 if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
1819 unsigned int flags = INPUT_MT_DIRECT; 1819 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
1820
1821 if (wacom_wac->features.type == TABLETPC2FG)
1822 flags = 0;
1823
1824 input_mt_init_slots(input_dev, features->touch_max, flags);
1825 }
1826 /* fall through */ 1820 /* fall through */
1827 1821
1828 case TABLETPC: 1822 case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1883 __set_bit(BTN_RIGHT, input_dev->keybit); 1877 __set_bit(BTN_RIGHT, input_dev->keybit);
1884 1878
1885 if (features->touch_max) { 1879 if (features->touch_max) {
1886 /* touch interface */
1887 unsigned int flags = INPUT_MT_POINTER;
1888
1889 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
1890 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { 1880 if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
1891 input_set_abs_params(input_dev, 1881 input_set_abs_params(input_dev,
1892 ABS_MT_TOUCH_MAJOR, 1882 ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1894 input_set_abs_params(input_dev, 1884 input_set_abs_params(input_dev,
1895 ABS_MT_TOUCH_MINOR, 1885 ABS_MT_TOUCH_MINOR,
1896 0, features->y_max, 0, 0); 1886 0, features->y_max, 0, 0);
1897 } else {
1898 __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
1899 __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
1900 flags = 0;
1901 } 1887 }
1902 input_mt_init_slots(input_dev, features->touch_max, flags); 1888 input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
1903 } else { 1889 } else {
1904 /* buttons/keys only interface */ 1890 /* buttons/keys only interface */
1905 __clear_bit(ABS_X, input_dev->absbit); 1891 __clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361f..2ce649520fe0 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
359 */ 359 */
360 err = of_property_read_u32(node, "ti,coordinate-readouts", 360 err = of_property_read_u32(node, "ti,coordinate-readouts",
361 &ts_dev->coordinate_readouts); 361 &ts_dev->coordinate_readouts);
362 if (err < 0) 362 if (err < 0) {
363 dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
363 err = of_property_read_u32(node, "ti,coordiante-readouts", 364 err = of_property_read_u32(node, "ti,coordiante-readouts",
364 &ts_dev->coordinate_readouts); 365 &ts_dev->coordinate_readouts);
366 }
367
365 if (err < 0) 368 if (err < 0)
366 return err; 369 return err;
367 370
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a333b7f798d1..62f0688d45a5 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -638,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
638 fprog.len = len; 638 fprog.len = len;
639 fprog.filter = code; 639 fprog.filter = code;
640 640
641 if (is->pass_filter) 641 if (is->pass_filter) {
642 sk_unattached_filter_destroy(is->pass_filter); 642 sk_unattached_filter_destroy(is->pass_filter);
643 err = sk_unattached_filter_create(&is->pass_filter, &fprog); 643 is->pass_filter = NULL;
644 }
645 if (fprog.filter != NULL)
646 err = sk_unattached_filter_create(&is->pass_filter,
647 &fprog);
648 else
649 err = 0;
644 kfree(code); 650 kfree(code);
645 651
646 return err; 652 return err;
@@ -657,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
657 fprog.len = len; 663 fprog.len = len;
658 fprog.filter = code; 664 fprog.filter = code;
659 665
660 if (is->active_filter) 666 if (is->active_filter) {
661 sk_unattached_filter_destroy(is->active_filter); 667 sk_unattached_filter_destroy(is->active_filter);
662 err = sk_unattached_filter_create(&is->active_filter, &fprog); 668 is->active_filter = NULL;
669 }
670 if (fprog.filter != NULL)
671 err = sk_unattached_filter_create(&is->active_filter,
672 &fprog);
673 else
674 err = 0;
663 kfree(code); 675 kfree(code);
664 676
665 return err; 677 return err;
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed7623..2e3cdcfa0a67 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
60 jiffies_to_msecs(jiffies) - 60 jiffies_to_msecs(jiffies) -
61 (jiffies_to_msecs(timeout) - TIMEOUT)); 61 (jiffies_to_msecs(timeout) - TIMEOUT));
62 62
63 if (!(cmd->args[0] >> 7) & 0x01) { 63 if (!((cmd->args[0] >> 7) & 0x01)) {
64 ret = -ETIMEDOUT; 64 ret = -ETIMEDOUT;
65 goto err_mutex_unlock; 65 goto err_mutex_unlock;
66 } 66 }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
485 if (ret) 485 if (ret)
486 goto err; 486 goto err;
487 487
488 cmd.args[0] = 0x05;
489 cmd.args[1] = 0x00;
490 cmd.args[2] = 0xaa;
491 cmd.args[3] = 0x4d;
492 cmd.args[4] = 0x56;
493 cmd.args[5] = 0x40;
494 cmd.args[6] = 0x00;
495 cmd.args[7] = 0x00;
496 cmd.wlen = 8;
497 cmd.rlen = 1;
498 ret = si2168_cmd_execute(s, &cmd);
499 if (ret)
500 goto err;
501
502 /* cold state - try to download firmware */ 488 /* cold state - try to download firmware */
503 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n", 489 dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
504 KBUILD_MODNAME, si2168_ops.info.name); 490 KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f40..53f7f06ae343 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
22#include <linux/firmware.h> 22#include <linux/firmware.h>
23#include <linux/i2c-mux.h> 23#include <linux/i2c-mux.h>
24 24
25#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw" 25#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
26 26
27/* state struct */ 27/* state struct */
28struct si2168 { 28struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5eee..9619be5d4827 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
668 struct dtv_frontend_properties *c = &fe->dtv_property_cache; 668 struct dtv_frontend_properties *c = &fe->dtv_property_cache;
669 int ret, i; 669 int ret, i;
670 u8 mode, rolloff, pilot, inversion, div; 670 u8 mode, rolloff, pilot, inversion, div;
671 fe_modulation_t modulation;
671 672
672 dev_dbg(&priv->i2c->dev, 673 dev_dbg(&priv->i2c->dev,
673 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n", 674 "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
702 703
703 switch (c->delivery_system) { 704 switch (c->delivery_system) {
704 case SYS_DVBS: 705 case SYS_DVBS:
706 modulation = QPSK;
705 rolloff = 0; 707 rolloff = 0;
706 pilot = 2; 708 pilot = 2;
707 break; 709 break;
708 case SYS_DVBS2: 710 case SYS_DVBS2:
711 modulation = c->modulation;
712
709 switch (c->rolloff) { 713 switch (c->rolloff) {
710 case ROLLOFF_20: 714 case ROLLOFF_20:
711 rolloff = 2; 715 rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
750 754
751 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) { 755 for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
752 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system && 756 if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
753 c->modulation == TDA10071_MODCOD[i].modulation && 757 modulation == TDA10071_MODCOD[i].modulation &&
754 c->fec_inner == TDA10071_MODCOD[i].fec) { 758 c->fec_inner == TDA10071_MODCOD[i].fec) {
755 mode = TDA10071_MODCOD[i].val; 759 mode = TDA10071_MODCOD[i].val;
756 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n", 760 dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
834 838
835 switch ((buf[1] >> 0) & 0x01) { 839 switch ((buf[1] >> 0) & 0x01) {
836 case 0: 840 case 0:
837 c->inversion = INVERSION_OFF; 841 c->inversion = INVERSION_ON;
838 break; 842 break;
839 case 1: 843 case 1:
840 c->inversion = INVERSION_ON; 844 c->inversion = INVERSION_OFF;
841 break; 845 break;
842 } 846 }
843 847
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
856 if (ret) 860 if (ret)
857 goto error; 861 goto error;
858 862
859 c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0); 863 c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
860 864
861 return ret; 865 return ret;
862error: 866error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65a..420486192736 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a }, 55 { SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b }, 56 { SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
57 /* 8PSK */ 57 /* 8PSK */
58 { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
58 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c }, 59 { SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
59 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d }, 60 { SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
60 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e }, 61 { SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8b..0006d6bf8c18 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
179 .read = vb2_fop_read, 179 .read = vb2_fop_read,
180 .poll = vb2_fop_poll, 180 .poll = vb2_fop_poll,
181 .mmap = vb2_fop_mmap, 181 .mmap = vb2_fop_mmap,
182 .ioctl = video_ioctl2, 182 .unlocked_ioctl = video_ioctl2,
183}; 183};
184 184
185static const struct v4l2_ioctl_ops ts_ioctl_ops = { 185static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed16497903..1e4ec697fb10 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
269 list_del(&buf->list); 269 list_del(&buf->list);
270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 270 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
271 } 271 }
272 spin_unlock_irqrestore(&common->irqlock, flags);
272 273
273 return ret; 274 return ret;
274} 275}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bcb..b431b58f39e3 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
233 list_del(&buf->list); 233 list_del(&buf->list);
234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 234 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
235 } 235 }
236 spin_unlock_irqrestore(&common->irqlock, flags);
236 237
237 return ret; 238 return ret;
238} 239}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee54..fa4cc7b880aa 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
57 jiffies_to_msecs(jiffies) - 57 jiffies_to_msecs(jiffies) -
58 (jiffies_to_msecs(timeout) - TIMEOUT)); 58 (jiffies_to_msecs(timeout) - TIMEOUT));
59 59
60 if (!(buf[0] >> 7) & 0x01) { 60 if (!((buf[0] >> 7) & 0x01)) {
61 ret = -ETIMEDOUT; 61 ret = -ETIMEDOUT;
62 goto err_mutex_unlock; 62 goto err_mutex_unlock;
63 } else { 63 } else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d7..7b9b75f60774 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
704 if (ret < 0) 704 if (ret < 0)
705 goto err; 705 goto err;
706 706
707 if (tmp == 0x00) 707 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
708 dev_dbg(&d->udev->dev, 708 __func__, i, tmp);
709 "%s: [%d]tuner not set, using default\n", 709
710 __func__, i); 710 /* tuner sanity check */
711 else 711 if (state->chip_type == 0x9135) {
712 if (state->chip_version == 0x02) {
713 /* IT9135 BX (v2) */
714 switch (tmp) {
715 case AF9033_TUNER_IT9135_60:
716 case AF9033_TUNER_IT9135_61:
717 case AF9033_TUNER_IT9135_62:
718 state->af9033_config[i].tuner = tmp;
719 break;
720 }
721 } else {
722 /* IT9135 AX (v1) */
723 switch (tmp) {
724 case AF9033_TUNER_IT9135_38:
725 case AF9033_TUNER_IT9135_51:
726 case AF9033_TUNER_IT9135_52:
727 state->af9033_config[i].tuner = tmp;
728 break;
729 }
730 }
731 } else {
732 /* AF9035 */
712 state->af9033_config[i].tuner = tmp; 733 state->af9033_config[i].tuner = tmp;
734 }
713 735
714 dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n", 736 if (state->af9033_config[i].tuner != tmp) {
715 __func__, i, state->af9033_config[i].tuner); 737 dev_info(&d->udev->dev,
738 "%s: [%d] overriding tuner from %02x to %02x\n",
739 KBUILD_MODNAME, i, tmp,
740 state->af9033_config[i].tuner);
741 }
716 742
717 switch (state->af9033_config[i].tuner) { 743 switch (state->af9033_config[i].tuner) {
718 case AF9033_TUNER_TUA9001: 744 case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0f..339adce7c7a5 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
928 {USB_DEVICE(0x093a, 0x2620)}, 928 {USB_DEVICE(0x093a, 0x2620)},
929 {USB_DEVICE(0x093a, 0x2621)}, 929 {USB_DEVICE(0x093a, 0x2621)},
930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP}, 930 {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
931 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP}, 932 {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
932 {USB_DEVICE(0x093a, 0x2625)}, 933 {USB_DEVICE(0x093a, 0x2625)},
933 {USB_DEVICE(0x093a, 0x2626)}, 934 {USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5f..6bce01a674f9 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
82} 82}
83 83
84/*=========================================================================*/ 84/*=========================================================================*/
85/* bufffer bits */ 85/* buffer bits */
86 86
87/* function expects dev->io_mutex to be hold by caller */ 87/* function expects dev->io_mutex to be hold by caller */
88int hdpvr_cancel_queue(struct hdpvr_device *dev) 88int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
926 case V4L2_CID_MPEG_AUDIO_ENCODING: 926 case V4L2_CID_MPEG_AUDIO_ENCODING:
927 if (dev->flags & HDPVR_FLAG_AC3_CAP) { 927 if (dev->flags & HDPVR_FLAG_AC3_CAP) {
928 opt->audio_codec = ctrl->val; 928 opt->audio_codec = ctrl->val;
929 return hdpvr_set_audio(dev, opt->audio_input, 929 return hdpvr_set_audio(dev, opt->audio_input + 1,
930 opt->audio_codec); 930 opt->audio_codec);
931 } 931 }
932 return 0; 932 return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1198 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1199 V4L2_CID_MPEG_AUDIO_ENCODING, 1199 V4L2_CID_MPEG_AUDIO_ENCODING,
1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC, 1200 ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
1201 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC); 1201 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops, 1202 v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
1203 V4L2_CID_MPEG_VIDEO_ENCODING, 1203 V4L2_CID_MPEG_VIDEO_ENCODING,
1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3, 1204 V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd03..ce1c9f5d9dee 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
610 aspect.denominator = 9; 610 aspect.denominator = 9;
611 } else if (ratio == 34) { 611 } else if (ratio == 34) {
612 aspect.numerator = 4; 612 aspect.numerator = 4;
613 aspect.numerator = 3; 613 aspect.denominator = 3;
614 } else if (ratio == 68) { 614 } else if (ratio == 68) {
615 aspect.numerator = 15; 615 aspect.numerator = 15;
616 aspect.numerator = 9; 616 aspect.denominator = 9;
617 } else { 617 } else {
618 aspect.numerator = hor_landscape + 99; 618 aspect.numerator = hor_landscape + 99;
619 aspect.denominator = 100; 619 aspect.denominator = 100;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 14c00048bbec..82322b1c8411 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -129,14 +129,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
129 name); 129 name);
130 } 130 }
131 131
132 cq->irq_desc =
133 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
134 cq->vector));
135 } 132 }
136 } else { 133 } else {
137 cq->vector = (cq->ring + 1 + priv->port) % 134 cq->vector = (cq->ring + 1 + priv->port) %
138 mdev->dev->caps.num_comp_vectors; 135 mdev->dev->caps.num_comp_vectors;
139 } 136 }
137
138 cq->irq_desc =
139 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
140 cq->vector));
140 } else { 141 } else {
141 /* For TX we use the same irq per 142 /* For TX we use the same irq per
142 ring we assigned for the RX */ 143 ring we assigned for the RX */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 06bdc31a828d..61623e9af574 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4240,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4240 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4240 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4241 break; 4241 break;
4242 case RTL_GIGA_MAC_VER_40: 4242 case RTL_GIGA_MAC_VER_40:
4243 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4244 break;
4243 case RTL_GIGA_MAC_VER_41: 4245 case RTL_GIGA_MAC_VER_41:
4244 case RTL_GIGA_MAC_VER_42: 4246 case RTL_GIGA_MAC_VER_42:
4245 case RTL_GIGA_MAC_VER_43: 4247 case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368bd..fd411d6e19a2 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
1083 return vp; 1083 return vp;
1084} 1084}
1085 1085
1086static void vnet_cleanup(void)
1087{
1088 struct vnet *vp;
1089 struct net_device *dev;
1090
1091 mutex_lock(&vnet_list_mutex);
1092 while (!list_empty(&vnet_list)) {
1093 vp = list_first_entry(&vnet_list, struct vnet, list);
1094 list_del(&vp->list);
1095 dev = vp->dev;
1096 /* vio_unregister_driver() should have cleaned up port_list */
1097 BUG_ON(!list_empty(&vp->port_list));
1098 unregister_netdev(dev);
1099 free_netdev(dev);
1100 }
1101 mutex_unlock(&vnet_list_mutex);
1102}
1103
1086static const char *local_mac_prop = "local-mac-address"; 1104static const char *local_mac_prop = "local-mac-address";
1087 1105
1088static struct vnet *vnet_find_parent(struct mdesc_handle *hp, 1106static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
1240 1258
1241 kfree(port); 1259 kfree(port);
1242 1260
1243 unregister_netdev(vp->dev);
1244 } 1261 }
1245 return 0; 1262 return 0;
1246} 1263}
@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
1268static void __exit vnet_exit(void) 1285static void __exit vnet_exit(void)
1269{ 1286{
1270 vio_unregister_driver(&vnet_port_driver); 1287 vio_unregister_driver(&vnet_port_driver);
1288 vnet_cleanup();
1271} 1289}
1272 1290
1273module_init(vnet_init); 1291module_init(vnet_init);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e2f20f807de8..d5b77ef3a210 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
757 }; 757 };
758 758
759 ppp_lock(ppp); 759 ppp_lock(ppp);
760 if (ppp->pass_filter) 760 if (ppp->pass_filter) {
761 sk_unattached_filter_destroy(ppp->pass_filter); 761 sk_unattached_filter_destroy(ppp->pass_filter);
762 err = sk_unattached_filter_create(&ppp->pass_filter, 762 ppp->pass_filter = NULL;
763 &fprog); 763 }
764 if (fprog.filter != NULL)
765 err = sk_unattached_filter_create(&ppp->pass_filter,
766 &fprog);
767 else
768 err = 0;
764 kfree(code); 769 kfree(code);
765 ppp_unlock(ppp); 770 ppp_unlock(ppp);
766 } 771 }
@@ -778,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
778 }; 783 };
779 784
780 ppp_lock(ppp); 785 ppp_lock(ppp);
781 if (ppp->active_filter) 786 if (ppp->active_filter) {
782 sk_unattached_filter_destroy(ppp->active_filter); 787 sk_unattached_filter_destroy(ppp->active_filter);
783 err = sk_unattached_filter_create(&ppp->active_filter, 788 ppp->active_filter = NULL;
784 &fprog); 789 }
790 if (fprog.filter != NULL)
791 err = sk_unattached_filter_create(&ppp->active_filter,
792 &fprog);
793 else
794 err = 0;
785 kfree(code); 795 kfree(code);
786 ppp_unlock(ppp); 796 ppp_unlock(ppp);
787 } 797 }
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2a..735f7dadb9a0 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76), 194 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
195 .driver_info = (unsigned long)&huawei_cdc_ncm_info, 195 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
196 }, 196 },
197 { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
198 .driver_info = (unsigned long)&huawei_cdc_ncm_info,
199 },
197 200
198 /* Terminating entry */ 201 /* Terminating entry */
199 { 202 {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c4638c67f6b9..22756db53dca 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, 667 {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, 668 {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 669 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
670 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
670 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 671 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
671 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 672 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
672 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ 673 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -757,6 +758,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 758 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
758 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */ 759 {QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
759 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 760 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
760 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
761 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 763 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
762 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 764 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f1978691..fa9fdfa128c1 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
122{ 122{
123 struct x25_asy *sl = netdev_priv(dev); 123 struct x25_asy *sl = netdev_priv(dev);
124 unsigned char *xbuff, *rbuff; 124 unsigned char *xbuff, *rbuff;
125 int len = 2 * newmtu; 125 int len;
126 126
127 if (newmtu > 65534)
128 return -EINVAL;
129
130 len = 2 * newmtu;
127 xbuff = kmalloc(len + 4, GFP_ATOMIC); 131 xbuff = kmalloc(len + 4, GFP_ATOMIC);
128 rbuff = kmalloc(len + 4, GFP_ATOMIC); 132 rbuff = kmalloc(len + 4, GFP_ATOMIC);
129 133
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b6..c65b636bcab9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1030{ 1030{
1031 struct gnttab_map_grant_ref *gop_map = *gopp_map; 1031 struct gnttab_map_grant_ref *gop_map = *gopp_map;
1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 1032 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1033 /* This always points to the shinfo of the skb being checked, which
1034 * could be either the first or the one on the frag_list
1035 */
1033 struct skb_shared_info *shinfo = skb_shinfo(skb); 1036 struct skb_shared_info *shinfo = skb_shinfo(skb);
1037 /* If this is non-NULL, we are currently checking the frag_list skb, and
1038 * this points to the shinfo of the first one
1039 */
1040 struct skb_shared_info *first_shinfo = NULL;
1034 int nr_frags = shinfo->nr_frags; 1041 int nr_frags = shinfo->nr_frags;
1042 const bool sharedslot = nr_frags &&
1043 frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1035 int i, err; 1044 int i, err;
1036 struct sk_buff *first_skb = NULL;
1037 1045
1038 /* Check status of header. */ 1046 /* Check status of header. */
1039 err = (*gopp_copy)->status; 1047 err = (*gopp_copy)->status;
1040 (*gopp_copy)++;
1041 if (unlikely(err)) { 1048 if (unlikely(err)) {
1042 if (net_ratelimit()) 1049 if (net_ratelimit())
1043 netdev_dbg(queue->vif->dev, 1050 netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1045 (*gopp_copy)->status, 1052 (*gopp_copy)->status,
1046 pending_idx, 1053 pending_idx,
1047 (*gopp_copy)->source.u.ref); 1054 (*gopp_copy)->source.u.ref);
1048 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1055 /* The first frag might still have this slot mapped */
1056 if (!sharedslot)
1057 xenvif_idx_release(queue, pending_idx,
1058 XEN_NETIF_RSP_ERROR);
1049 } 1059 }
1060 (*gopp_copy)++;
1050 1061
1051check_frags: 1062check_frags:
1052 for (i = 0; i < nr_frags; i++, gop_map++) { 1063 for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
1062 pending_idx, 1073 pending_idx,
1063 gop_map->handle); 1074 gop_map->handle);
1064 /* Had a previous error? Invalidate this fragment. */ 1075 /* Had a previous error? Invalidate this fragment. */
1065 if (unlikely(err)) 1076 if (unlikely(err)) {
1066 xenvif_idx_unmap(queue, pending_idx); 1077 xenvif_idx_unmap(queue, pending_idx);
1078 /* If the mapping of the first frag was OK, but
1079 * the header's copy failed, and they are
1080 * sharing a slot, send an error
1081 */
1082 if (i == 0 && sharedslot)
1083 xenvif_idx_release(queue, pending_idx,
1084 XEN_NETIF_RSP_ERROR);
1085 else
1086 xenvif_idx_release(queue, pending_idx,
1087 XEN_NETIF_RSP_OKAY);
1088 }
1067 continue; 1089 continue;
1068 } 1090 }
1069 1091
@@ -1075,42 +1097,53 @@ check_frags:
1075 gop_map->status, 1097 gop_map->status,
1076 pending_idx, 1098 pending_idx,
1077 gop_map->ref); 1099 gop_map->ref);
1100
1078 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); 1101 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1079 1102
1080 /* Not the first error? Preceding frags already invalidated. */ 1103 /* Not the first error? Preceding frags already invalidated. */
1081 if (err) 1104 if (err)
1082 continue; 1105 continue;
1083 /* First error: invalidate preceding fragments. */ 1106
1107 /* First error: if the header haven't shared a slot with the
1108 * first frag, release it as well.
1109 */
1110 if (!sharedslot)
1111 xenvif_idx_release(queue,
1112 XENVIF_TX_CB(skb)->pending_idx,
1113 XEN_NETIF_RSP_OKAY);
1114
1115 /* Invalidate preceding fragments of this skb. */
1084 for (j = 0; j < i; j++) { 1116 for (j = 0; j < i; j++) {
1085 pending_idx = frag_get_pending_idx(&shinfo->frags[j]); 1117 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1086 xenvif_idx_unmap(queue, pending_idx); 1118 xenvif_idx_unmap(queue, pending_idx);
1119 xenvif_idx_release(queue, pending_idx,
1120 XEN_NETIF_RSP_OKAY);
1121 }
1122
1123 /* And if we found the error while checking the frag_list, unmap
1124 * the first skb's frags
1125 */
1126 if (first_shinfo) {
1127 for (j = 0; j < first_shinfo->nr_frags; j++) {
1128 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1129 xenvif_idx_unmap(queue, pending_idx);
1130 xenvif_idx_release(queue, pending_idx,
1131 XEN_NETIF_RSP_OKAY);
1132 }
1087 } 1133 }
1088 1134
1089 /* Remember the error: invalidate all subsequent fragments. */ 1135 /* Remember the error: invalidate all subsequent fragments. */
1090 err = newerr; 1136 err = newerr;
1091 } 1137 }
1092 1138
1093 if (skb_has_frag_list(skb)) { 1139 if (skb_has_frag_list(skb) && !first_shinfo) {
1094 first_skb = skb; 1140 first_shinfo = skb_shinfo(skb);
1095 skb = shinfo->frag_list; 1141 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1096 shinfo = skb_shinfo(skb);
1097 nr_frags = shinfo->nr_frags; 1142 nr_frags = shinfo->nr_frags;
1098 1143
1099 goto check_frags; 1144 goto check_frags;
1100 } 1145 }
1101 1146
1102 /* There was a mapping error in the frag_list skb. We have to unmap
1103 * the first skb's frags
1104 */
1105 if (first_skb && err) {
1106 int j;
1107 shinfo = skb_shinfo(first_skb);
1108 for (j = 0; j < shinfo->nr_frags; j++) {
1109 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1110 xenvif_idx_unmap(queue, pending_idx);
1111 }
1112 }
1113
1114 *gopp_map = gop_map; 1147 *gopp_map = gop_map;
1115 return err; 1148 return err;
1116} 1149}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
1518 1551
1519 /* Check the remap error code. */ 1552 /* Check the remap error code. */
1520 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { 1553 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1554 /* If there was an error, xenvif_tx_check_gop is
1555 * expected to release all the frags which were mapped,
1556 * so kfree_skb shouldn't do it again
1557 */
1521 skb_shinfo(skb)->nr_frags = 0; 1558 skb_shinfo(skb)->nr_frags = 0;
1559 if (skb_has_frag_list(skb)) {
1560 struct sk_buff *nskb =
1561 skb_shinfo(skb)->frag_list;
1562 skb_shinfo(nskb)->nr_frags = 0;
1563 }
1522 kfree_skb(skb); 1564 kfree_skb(skb);
1523 continue; 1565 continue;
1524 } 1566 }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1822 tx_unmap_op.status); 1864 tx_unmap_op.status);
1823 BUG(); 1865 BUG();
1824 } 1866 }
1825
1826 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
1827} 1867}
1828 1868
1829static inline int rx_work_todo(struct xenvif_queue *queue) 1869static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95e..9f43916637ca 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
1431 1431
1432 status = readl(info->irqmux_base); 1432 status = readl(info->irqmux_base);
1433 1433
1434 for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK) 1434 for_each_set_bit(n, &status, info->nbanks)
1435 __gpio_irq_handler(&info->banks[n]); 1435 __gpio_irq_handler(&info->banks[n]);
1436 1436
1437 chained_irq_exit(chip, desc); 1437 chained_irq_exit(chip, desc);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f8656..220acb4cbee5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
633 } else 633 } else
634 raw3270_writesf_readpart(rp); 634 raw3270_writesf_readpart(rp);
635 memset(&rp->init_reset, 0, sizeof(rp->init_reset)); 635 memset(&rp->init_reset, 0, sizeof(rp->init_reset));
636 memset(&rp->init_data, 0, sizeof(rp->init_data));
637} 636}
638 637
639static int 638static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac8..4038437ff033 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
901 int rc; 901 int rc;
902 902
903 ap_dev->drv = ap_drv; 903 ap_dev->drv = ap_drv;
904
905 spin_lock_bh(&ap_device_list_lock);
906 list_add(&ap_dev->list, &ap_device_list);
907 spin_unlock_bh(&ap_device_list_lock);
908
904 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 909 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
905 if (!rc) { 910 if (rc) {
906 spin_lock_bh(&ap_device_list_lock); 911 spin_lock_bh(&ap_device_list_lock);
907 list_add(&ap_dev->list, &ap_device_list); 912 list_del_init(&ap_dev->list);
908 spin_unlock_bh(&ap_device_list_lock); 913 spin_unlock_bh(&ap_device_list_lock);
909 } 914 }
910 return rc; 915 return rc;
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047e..8afc6fee40c5 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
1config VIDEO_OMAP4 1config VIDEO_OMAP4
2 bool "OMAP 4 Camera support" 2 bool "OMAP 4 Camera support"
3 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4 3 depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
4 select VIDEOBUF2_DMA_CONTIG 4 select VIDEOBUF2_DMA_CONTIG
5 ---help--- 5 ---help---
6 Driver for an OMAP 4 ISS controller. 6 Driver for an OMAP 4 ISS controller.
diff --git a/fs/coredump.c b/fs/coredump.c
index 0b2528fb640e..a93f7e6ea4cf 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
306 if (unlikely(nr < 0)) 306 if (unlikely(nr < 0))
307 return nr; 307 return nr;
308 308
309 tsk->flags = PF_DUMPCORE; 309 tsk->flags |= PF_DUMPCORE;
310 if (atomic_read(&mm->mm_users) == nr + 1) 310 if (atomic_read(&mm->mm_users) == nr + 1)
311 goto done; 311 goto done;
312 /* 312 /*
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8474028d7848..03246cd9d47a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -907,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
907 fc->writeback_cache = 1; 907 fc->writeback_cache = 1;
908 if (arg->time_gran && arg->time_gran <= 1000000000) 908 if (arg->time_gran && arg->time_gran <= 1000000000)
909 fc->sb->s_time_gran = arg->time_gran; 909 fc->sb->s_time_gran = arg->time_gran;
910 else
911 fc->sb->s_time_gran = 1000000000;
912
913 } else { 910 } else {
914 ra_pages = fc->max_read / PAGE_CACHE_SIZE; 911 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
915 fc->no_lock = 1; 912 fc->no_lock = 1;
@@ -938,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
938 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | 935 FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
939 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA | 936 FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
940 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO | 937 FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
941 FUSE_WRITEBACK_CACHE; 938 FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
942 req->in.h.opcode = FUSE_INIT; 939 req->in.h.opcode = FUSE_INIT;
943 req->in.numargs = 1; 940 req->in.numargs = 1;
944 req->in.args[0].size = sizeof(*arg); 941 req->in.args[0].size = sizeof(*arg);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b56b1cc02718..944275c8f56d 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2879,6 +2879,7 @@ again:
2879 * return the conflicting open: 2879 * return the conflicting open:
2880 */ 2880 */
2881 if (conf->len) { 2881 if (conf->len) {
2882 kfree(conf->data);
2882 conf->len = 0; 2883 conf->len = 0;
2883 conf->data = NULL; 2884 conf->data = NULL;
2884 goto again; 2885 goto again;
@@ -2891,6 +2892,7 @@ again:
2891 if (conf->len) { 2892 if (conf->len) {
2892 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8); 2893 p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
2893 p = xdr_encode_opaque(p, conf->data, conf->len); 2894 p = xdr_encode_opaque(p, conf->data, conf->len);
2895 kfree(conf->data);
2894 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */ 2896 } else { /* non - nfsv4 lock in conflict, no clientid nor owner */
2895 p = xdr_encode_hyper(p, (u64)0); /* clientid */ 2897 p = xdr_encode_hyper(p, (u64)0); /* clientid */
2896 *p++ = cpu_to_be32(0); /* length of owner name */ 2898 *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2907,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
2907 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid); 2909 nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
2908 else if (nfserr == nfserr_denied) 2910 else if (nfserr == nfserr_denied)
2909 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied); 2911 nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
2910 kfree(lock->lk_denied.ld_owner.data); 2912
2911 return nfserr; 2913 return nfserr;
2912} 2914}
2913 2915
diff --git a/fs/xattr.c b/fs/xattr.c
index 3377dff18404..c69e6d43a0d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
843 843
844 /* wrap around? */ 844 /* wrap around? */
845 len = sizeof(*new_xattr) + size; 845 len = sizeof(*new_xattr) + size;
846 if (len <= sizeof(*new_xattr)) 846 if (len < sizeof(*new_xattr))
847 return NULL; 847 return NULL;
848 848
849 new_xattr = kmalloc(len, GFP_KERNEL); 849 new_xattr = kmalloc(len, GFP_KERNEL);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 5ab4e3a76721..92abb497ab14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -593,6 +593,7 @@ struct ata_host {
593 struct device *dev; 593 struct device *dev;
594 void __iomem * const *iomap; 594 void __iomem * const *iomap;
595 unsigned int n_ports; 595 unsigned int n_ports;
596 unsigned int n_tags; /* nr of NCQ tags */
596 void *private_data; 597 void *private_data;
597 struct ata_port_operations *ops; 598 struct ata_port_operations *ops;
598 unsigned long flags; 599 unsigned long flags;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 0a97b583ee8d..e1474ae18c88 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
399} 399}
400 400
401/* 401/*
402 * Get the offset in PAGE_SIZE.
403 * (TODO: hugepage should have ->index in PAGE_SIZE)
404 */
405static inline pgoff_t page_to_pgoff(struct page *page)
406{
407 if (unlikely(PageHeadHuge(page)))
408 return page->index << compound_order(page);
409 else
410 return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
411}
412
413/*
402 * Return byte-offset into filesystem object for page. 414 * Return byte-offset into filesystem object for page.
403 */ 415 */
404static inline loff_t page_offset(struct page *page) 416static inline loff_t page_offset(struct page *page)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 713b0b88bd5a..c4d86198d3d6 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -6,6 +6,7 @@
6#include <linux/netfilter/nfnetlink.h> 6#include <linux/netfilter/nfnetlink.h>
7#include <linux/netfilter/x_tables.h> 7#include <linux/netfilter/x_tables.h>
8#include <linux/netfilter/nf_tables.h> 8#include <linux/netfilter/nf_tables.h>
9#include <linux/u64_stats_sync.h>
9#include <net/netlink.h> 10#include <net/netlink.h>
10 11
11#define NFT_JUMP_STACK_SIZE 16 12#define NFT_JUMP_STACK_SIZE 16
@@ -528,8 +529,9 @@ enum nft_chain_type {
528}; 529};
529 530
530struct nft_stats { 531struct nft_stats {
531 u64 bytes; 532 u64 bytes;
532 u64 pkts; 533 u64 pkts;
534 struct u64_stats_sync syncp;
533}; 535};
534 536
535#define NFT_HOOK_OPS_MAX 2 537#define NFT_HOOK_OPS_MAX 2
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 26a394cb91a8..eee608b12cc9 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -13,8 +13,8 @@ struct netns_nftables {
13 struct nft_af_info *inet; 13 struct nft_af_info *inet;
14 struct nft_af_info *arp; 14 struct nft_af_info *arp;
15 struct nft_af_info *bridge; 15 struct nft_af_info *bridge;
16 unsigned int base_seq;
16 u8 gencursor; 17 u8 gencursor;
17 u8 genctr;
18}; 18};
19 19
20#endif 20#endif
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 40b5ca8a1b1f..25084a052a1e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -101,6 +101,7 @@
101 * - add FATTR_CTIME 101 * - add FATTR_CTIME
102 * - add ctime and ctimensec to fuse_setattr_in 102 * - add ctime and ctimensec to fuse_setattr_in
103 * - add FUSE_RENAME2 request 103 * - add FUSE_RENAME2 request
104 * - add FUSE_NO_OPEN_SUPPORT flag
104 */ 105 */
105 106
106#ifndef _LINUX_FUSE_H 107#ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
229 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus 230 * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
230 * FUSE_ASYNC_DIO: asynchronous direct I/O submission 231 * FUSE_ASYNC_DIO: asynchronous direct I/O submission
231 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes 232 * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
233 * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
232 */ 234 */
233#define FUSE_ASYNC_READ (1 << 0) 235#define FUSE_ASYNC_READ (1 << 0)
234#define FUSE_POSIX_LOCKS (1 << 1) 236#define FUSE_POSIX_LOCKS (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
247#define FUSE_READDIRPLUS_AUTO (1 << 14) 249#define FUSE_READDIRPLUS_AUTO (1 << 14)
248#define FUSE_ASYNC_DIO (1 << 15) 250#define FUSE_ASYNC_DIO (1 << 15)
249#define FUSE_WRITEBACK_CACHE (1 << 16) 251#define FUSE_WRITEBACK_CACHE (1 << 16)
252#define FUSE_NO_OPEN_SUPPORT (1 << 17)
250 253
251/** 254/**
252 * CUSE INIT request/reply flags 255 * CUSE INIT request/reply flags
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index bda9621638cc..291397e66669 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -823,7 +823,7 @@ static struct {
823 { trace_clock_local, "local", 1 }, 823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 }, 824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 }, 825 { trace_clock_counter, "counter", 0 },
826 { trace_clock_jiffies, "uptime", 1 }, 826 { trace_clock_jiffies, "uptime", 0 },
827 { trace_clock, "perf", 1 }, 827 { trace_clock, "perf", 1 },
828 ARCH_TRACE_CLOCKS 828 ARCH_TRACE_CLOCKS
829}; 829};
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 26dc348332b7..57b67b1f24d1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
59 59
60/* 60/*
61 * trace_jiffy_clock(): Simply use jiffies as a clock counter. 61 * trace_jiffy_clock(): Simply use jiffies as a clock counter.
62 * Note that this use of jiffies_64 is not completely safe on
63 * 32-bit systems. But the window is tiny, and the effect if
64 * we are affected is that we will have an obviously bogus
65 * timestamp on a trace event - i.e. not life threatening.
62 */ 66 */
63u64 notrace trace_clock_jiffies(void) 67u64 notrace trace_clock_jiffies(void)
64{ 68{
65 u64 jiffy = jiffies - INITIAL_JIFFIES; 69 return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
66
67 /* Return nsecs */
68 return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
69} 70}
70 71
71/* 72/*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2024bbd573d2..9221c02ed9e2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2604,6 +2604,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2604 } else { 2604 } else {
2605 if (cow) 2605 if (cow)
2606 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2607 entry = huge_ptep_get(src_pte);
2607 ptepage = pte_page(entry); 2608 ptepage = pte_page(entry);
2608 get_page(ptepage); 2609 get_page(ptepage);
2609 page_dup_rmap(ptepage); 2610 page_dup_rmap(ptepage);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c6399e328931..7211a73ba14d 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
435 if (av == NULL) /* Not actually mapped anymore */ 435 if (av == NULL) /* Not actually mapped anymore */
436 return; 436 return;
437 437
438 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 438 pgoff = page_to_pgoff(page);
439 read_lock(&tasklist_lock); 439 read_lock(&tasklist_lock);
440 for_each_process (tsk) { 440 for_each_process (tsk) {
441 struct anon_vma_chain *vmac; 441 struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
469 mutex_lock(&mapping->i_mmap_mutex); 469 mutex_lock(&mapping->i_mmap_mutex);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 472 pgoff_t pgoff = page_to_pgoff(page);
473 struct task_struct *t = task_early_kill(tsk, force_early); 473 struct task_struct *t = task_early_kill(tsk, force_early);
474 474
475 if (!t) 475 if (!t)
diff --git a/mm/memory.c b/mm/memory.c
index d67fd9fcf1f2..7e8d8205b610 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2882 * if page by the offset is not ready to be mapped (cold cache or 2882 * if page by the offset is not ready to be mapped (cold cache or
2883 * something). 2883 * something).
2884 */ 2884 */
2885 if (vma->vm_ops->map_pages && fault_around_pages() > 1) { 2885 if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
2886 fault_around_pages() > 1) {
2886 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2887 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2887 do_fault_around(vma, address, pte, pgoff, flags); 2888 do_fault_around(vma, address, pte, pgoff, flags);
2888 if (!pte_same(*pte, orig_pte)) 2889 if (!pte_same(*pte, orig_pte))
diff --git a/mm/rmap.c b/mm/rmap.c
index b7e94ebbd09e..22a4a7699cdb 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
517static inline unsigned long 517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma) 518__vma_address(struct page *page, struct vm_area_struct *vma)
519{ 519{
520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 520 pgoff_t pgoff = page_to_pgoff(page);
521
522 if (unlikely(is_vm_hugetlb_page(vma)))
523 pgoff = page->index << huge_page_order(page_hstate(page));
524
525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
526} 522}
527 523
@@ -1639,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1639static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1640{ 1636{
1641 struct anon_vma *anon_vma; 1637 struct anon_vma *anon_vma;
1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1638 pgoff_t pgoff = page_to_pgoff(page);
1643 struct anon_vma_chain *avc; 1639 struct anon_vma_chain *avc;
1644 int ret = SWAP_AGAIN; 1640 int ret = SWAP_AGAIN;
1645 1641
@@ -1680,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1680static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1681{ 1677{
1682 struct address_space *mapping = page->mapping; 1678 struct address_space *mapping = page->mapping;
1683 pgoff_t pgoff = page->index << compound_order(page); 1679 pgoff_t pgoff = page_to_pgoff(page);
1684 struct vm_area_struct *vma; 1680 struct vm_area_struct *vma;
1685 int ret = SWAP_AGAIN; 1681 int ret = SWAP_AGAIN;
1686 1682
diff --git a/mm/shmem.c b/mm/shmem.c
index 1140f49b6ded..af68b15a8fc1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
85 * a time): we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */ 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
89 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
90 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -468,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
468 return; 468 return;
469 469
470 index = start; 470 index = start;
471 for ( ; ; ) { 471 while (index < end) {
472 cond_resched(); 472 cond_resched();
473 473
474 pvec.nr = find_get_entries(mapping, index, 474 pvec.nr = find_get_entries(mapping, index,
475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 475 min(end - index, (pgoff_t)PAGEVEC_SIZE),
476 pvec.pages, indices); 476 pvec.pages, indices);
477 if (!pvec.nr) { 477 if (!pvec.nr) {
478 if (index == start || unfalloc) 478 /* If all gone or hole-punch or unfalloc, we're done */
479 if (index == start || end != -1)
479 break; 480 break;
481 /* But if truncating, restart to make sure all gone */
480 index = start; 482 index = start;
481 continue; 483 continue;
482 } 484 }
483 if ((index == start || unfalloc) && indices[0] >= end) {
484 pagevec_remove_exceptionals(&pvec);
485 pagevec_release(&pvec);
486 break;
487 }
488 mem_cgroup_uncharge_start(); 485 mem_cgroup_uncharge_start();
489 for (i = 0; i < pagevec_count(&pvec); i++) { 486 for (i = 0; i < pagevec_count(&pvec); i++) {
490 struct page *page = pvec.pages[i]; 487 struct page *page = pvec.pages[i];
@@ -496,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
496 if (radix_tree_exceptional_entry(page)) { 493 if (radix_tree_exceptional_entry(page)) {
497 if (unfalloc) 494 if (unfalloc)
498 continue; 495 continue;
499 nr_swaps_freed += !shmem_free_swap(mapping, 496 if (shmem_free_swap(mapping, index, page)) {
500 index, page); 497 /* Swap was replaced by page: retry */
498 index--;
499 break;
500 }
501 nr_swaps_freed++;
501 continue; 502 continue;
502 } 503 }
503 504
@@ -506,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
506 if (page->mapping == mapping) { 507 if (page->mapping == mapping) {
507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 VM_BUG_ON_PAGE(PageWriteback(page), page);
508 truncate_inode_page(mapping, page); 509 truncate_inode_page(mapping, page);
510 } else {
511 /* Page was replaced by swap: retry */
512 unlock_page(page);
513 index--;
514 break;
509 } 515 }
510 } 516 }
511 unlock_page(page); 517 unlock_page(page);
@@ -760,7 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
760 spin_lock(&inode->i_lock); 766 spin_lock(&inode->i_lock);
761 shmem_falloc = inode->i_private; 767 shmem_falloc = inode->i_private;
762 if (shmem_falloc && 768 if (shmem_falloc &&
763 !shmem_falloc->mode && 769 !shmem_falloc->waitq &&
764 index >= shmem_falloc->start && 770 index >= shmem_falloc->start &&
765 index < shmem_falloc->next) 771 index < shmem_falloc->next)
766 shmem_falloc->nr_unswapped++; 772 shmem_falloc->nr_unswapped++;
@@ -1248,38 +1254,58 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1248 * Trinity finds that probing a hole which tmpfs is punching can 1254 * Trinity finds that probing a hole which tmpfs is punching can
1249 * prevent the hole-punch from ever completing: which in turn 1255 * prevent the hole-punch from ever completing: which in turn
1250 * locks writers out with its hold on i_mutex. So refrain from 1256 * locks writers out with its hold on i_mutex. So refrain from
1251 * faulting pages into the hole while it's being punched, and 1257 * faulting pages into the hole while it's being punched. Although
1252 * wait on i_mutex to be released if vmf->flags permits. 1258 * shmem_undo_range() does remove the additions, it may be unable to
1259 * keep up, as each new page needs its own unmap_mapping_range() call,
1260 * and the i_mmap tree grows ever slower to scan if new vmas are added.
1261 *
1262 * It does not matter if we sometimes reach this check just before the
1263 * hole-punch begins, so that one fault then races with the punch:
1264 * we just need to make racing faults a rare case.
1265 *
1266 * The implementation below would be much simpler if we just used a
1267 * standard mutex or completion: but we cannot take i_mutex in fault,
1268 * and bloating every shmem inode for this unlikely case would be sad.
1253 */ 1269 */
1254 if (unlikely(inode->i_private)) { 1270 if (unlikely(inode->i_private)) {
1255 struct shmem_falloc *shmem_falloc; 1271 struct shmem_falloc *shmem_falloc;
1256 1272
1257 spin_lock(&inode->i_lock); 1273 spin_lock(&inode->i_lock);
1258 shmem_falloc = inode->i_private; 1274 shmem_falloc = inode->i_private;
1259 if (!shmem_falloc || 1275 if (shmem_falloc &&
1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE || 1276 shmem_falloc->waitq &&
1261 vmf->pgoff < shmem_falloc->start || 1277 vmf->pgoff >= shmem_falloc->start &&
1262 vmf->pgoff >= shmem_falloc->next) 1278 vmf->pgoff < shmem_falloc->next) {
1263 shmem_falloc = NULL; 1279 wait_queue_head_t *shmem_falloc_waitq;
1264 spin_unlock(&inode->i_lock); 1280 DEFINE_WAIT(shmem_fault_wait);
1265 /* 1281
1266 * i_lock has protected us from taking shmem_falloc seriously 1282 ret = VM_FAULT_NOPAGE;
1267 * once return from shmem_fallocate() went back up that stack.
1268 * i_lock does not serialize with i_mutex at all, but it does
1269 * not matter if sometimes we wait unnecessarily, or sometimes
1270 * miss out on waiting: we just need to make those cases rare.
1271 */
1272 if (shmem_falloc) {
1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1283 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1284 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1285 /* It's polite to up mmap_sem if we can */
1275 up_read(&vma->vm_mm->mmap_sem); 1286 up_read(&vma->vm_mm->mmap_sem);
1276 mutex_lock(&inode->i_mutex); 1287 ret = VM_FAULT_RETRY;
1277 mutex_unlock(&inode->i_mutex);
1278 return VM_FAULT_RETRY;
1279 } 1288 }
1280 /* cond_resched? Leave that to GUP or return to user */ 1289
1281 return VM_FAULT_NOPAGE; 1290 shmem_falloc_waitq = shmem_falloc->waitq;
1291 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1292 TASK_UNINTERRUPTIBLE);
1293 spin_unlock(&inode->i_lock);
1294 schedule();
1295
1296 /*
1297 * shmem_falloc_waitq points into the shmem_fallocate()
1298 * stack of the hole-punching task: shmem_falloc_waitq
1299 * is usually invalid by the time we reach here, but
1300 * finish_wait() does not dereference it in that case;
1301 * though i_lock needed lest racing with wake_up_all().
1302 */
1303 spin_lock(&inode->i_lock);
1304 finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1305 spin_unlock(&inode->i_lock);
1306 return ret;
1282 } 1307 }
1308 spin_unlock(&inode->i_lock);
1283 } 1309 }
1284 1310
1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
@@ -1774,13 +1800,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1774 1800
1775 mutex_lock(&inode->i_mutex); 1801 mutex_lock(&inode->i_mutex);
1776 1802
1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778
1779 if (mode & FALLOC_FL_PUNCH_HOLE) { 1803 if (mode & FALLOC_FL_PUNCH_HOLE) {
1780 struct address_space *mapping = file->f_mapping; 1804 struct address_space *mapping = file->f_mapping;
1781 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1805 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1806 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1807 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
1783 1808
1809 shmem_falloc.waitq = &shmem_falloc_waitq;
1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1810 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1811 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1786 spin_lock(&inode->i_lock); 1812 spin_lock(&inode->i_lock);
@@ -1792,8 +1818,13 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1792 1 + unmap_end - unmap_start, 0); 1818 1 + unmap_end - unmap_start, 0);
1793 shmem_truncate_range(inode, offset, offset + len - 1); 1819 shmem_truncate_range(inode, offset, offset + len - 1);
1794 /* No need to unmap again: hole-punching leaves COWed pages */ 1820 /* No need to unmap again: hole-punching leaves COWed pages */
1821
1822 spin_lock(&inode->i_lock);
1823 inode->i_private = NULL;
1824 wake_up_all(&shmem_falloc_waitq);
1825 spin_unlock(&inode->i_lock);
1795 error = 0; 1826 error = 0;
1796 goto undone; 1827 goto out;
1797 } 1828 }
1798 1829
1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1830 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
@@ -1809,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1809 goto out; 1840 goto out;
1810 } 1841 }
1811 1842
1843 shmem_falloc.waitq = NULL;
1812 shmem_falloc.start = start; 1844 shmem_falloc.start = start;
1813 shmem_falloc.next = start; 1845 shmem_falloc.next = start;
1814 shmem_falloc.nr_falloced = 0; 1846 shmem_falloc.nr_falloced = 0;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 735e01a0db6f..d31c4bacc6a2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
55 continue; 55 continue;
56 } 56 }
57 57
58#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) 58#if !defined(CONFIG_SLUB)
59 if (!strcmp(s->name, name)) { 59 if (!strcmp(s->name, name)) {
60 pr_err("%s (%s): Cache name already exists.\n", 60 pr_err("%s (%s): Cache name already exists.\n",
61 __func__, name); 61 __func__, name);
diff --git a/mm/truncate.c b/mm/truncate.c
index 6a78c814bebf..eda247307164 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
355 for ( ; ; ) { 355 for ( ; ; ) {
356 cond_resched(); 356 cond_resched();
357 if (!pagevec_lookup_entries(&pvec, mapping, index, 357 if (!pagevec_lookup_entries(&pvec, mapping, index,
358 min(end - index, (pgoff_t)PAGEVEC_SIZE), 358 min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
359 indices)) { 359 /* If all gone from start onwards, we're done */
360 if (index == start) 360 if (index == start)
361 break; 361 break;
362 /* Otherwise restart to make sure all gone */
362 index = start; 363 index = start;
363 continue; 364 continue;
364 } 365 }
365 if (index == start && indices[0] >= end) { 366 if (index == start && indices[0] >= end) {
367 /* All gone out of hole to be punched, we're done */
366 pagevec_remove_exceptionals(&pvec); 368 pagevec_remove_exceptionals(&pvec);
367 pagevec_release(&pvec); 369 pagevec_release(&pvec);
368 break; 370 break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
373 375
374 /* We rely upon deletion not changing page->index */ 376 /* We rely upon deletion not changing page->index */
375 index = indices[i]; 377 index = indices[i];
376 if (index >= end) 378 if (index >= end) {
379 /* Restart punch to make sure all gone */
380 index = start - 1;
377 break; 381 break;
382 }
378 383
379 if (radix_tree_exceptional_entry(page)) { 384 if (radix_tree_exceptional_entry(page)) {
380 clear_exceptional_entry(mapping, index, page); 385 clear_exceptional_entry(mapping, index, page);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 6f0d9ec37950..a957c8140721 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 800 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
801 bla_dst_own = &bat_priv->bla.claim_dest; 801 bla_dst_own = &bat_priv->bla.claim_dest;
802 802
803 /* check if it is a claim packet in general */
804 if (memcmp(bla_dst->magic, bla_dst_own->magic,
805 sizeof(bla_dst->magic)) != 0)
806 return 0;
807
808 /* if announcement packet, use the source, 803 /* if announcement packet, use the source,
809 * otherwise assume it is in the hw_src 804 * otherwise assume it is in the hw_src
810 */ 805 */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
866 struct batadv_hard_iface *primary_if, 861 struct batadv_hard_iface *primary_if,
867 struct sk_buff *skb) 862 struct sk_buff *skb)
868{ 863{
869 struct batadv_bla_claim_dst *bla_dst; 864 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
870 uint8_t *hw_src, *hw_dst; 865 uint8_t *hw_src, *hw_dst;
871 struct vlan_ethhdr *vhdr; 866 struct vlan_hdr *vhdr, vhdr_buf;
872 struct ethhdr *ethhdr; 867 struct ethhdr *ethhdr;
873 struct arphdr *arphdr; 868 struct arphdr *arphdr;
874 unsigned short vid; 869 unsigned short vid;
870 int vlan_depth = 0;
875 __be16 proto; 871 __be16 proto;
876 int headlen; 872 int headlen;
877 int ret; 873 int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
882 proto = ethhdr->h_proto; 878 proto = ethhdr->h_proto;
883 headlen = ETH_HLEN; 879 headlen = ETH_HLEN;
884 if (vid & BATADV_VLAN_HAS_TAG) { 880 if (vid & BATADV_VLAN_HAS_TAG) {
885 vhdr = vlan_eth_hdr(skb); 881 /* Traverse the VLAN/Ethertypes.
886 proto = vhdr->h_vlan_encapsulated_proto; 882 *
887 headlen += VLAN_HLEN; 883 * At this point it is known that the first protocol is a VLAN
884 * header, so start checking at the encapsulated protocol.
885 *
886 * The depth of the VLAN headers is recorded to drop BLA claim
887 * frames encapsulated into multiple VLAN headers (QinQ).
888 */
889 do {
890 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
891 &vhdr_buf);
892 if (!vhdr)
893 return 0;
894
895 proto = vhdr->h_vlan_encapsulated_proto;
896 headlen += VLAN_HLEN;
897 vlan_depth++;
898 } while (proto == htons(ETH_P_8021Q));
888 } 899 }
889 900
890 if (proto != htons(ETH_P_ARP)) 901 if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
914 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); 925 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
915 hw_dst = hw_src + ETH_ALEN + 4; 926 hw_dst = hw_src + ETH_ALEN + 4;
916 bla_dst = (struct batadv_bla_claim_dst *)hw_dst; 927 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
928 bla_dst_own = &bat_priv->bla.claim_dest;
929
930 /* check if it is a claim frame in general */
931 if (memcmp(bla_dst->magic, bla_dst_own->magic,
932 sizeof(bla_dst->magic)) != 0)
933 return 0;
934
935 /* check if there is a claim frame encapsulated deeper in (QinQ) and
936 * drop that, as this is not supported by BLA but should also not be
937 * sent via the mesh.
938 */
939 if (vlan_depth > 1)
940 return 1;
917 941
918 /* check if it is a claim frame. */ 942 /* check if it is a claim frame. */
919 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, 943 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e7ee65dc20bf..cbd677f48c00 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -448,10 +448,15 @@ out:
448 * possibly free it 448 * possibly free it
449 * @softif_vlan: the vlan object to release 449 * @softif_vlan: the vlan object to release
450 */ 450 */
451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan) 451void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
452{ 452{
453 if (atomic_dec_and_test(&softif_vlan->refcount)) 453 if (atomic_dec_and_test(&vlan->refcount)) {
454 kfree_rcu(softif_vlan, rcu); 454 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
455 hlist_del_rcu(&vlan->list);
456 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
457
458 kfree_rcu(vlan, rcu);
459 }
455} 460}
456 461
457/** 462/**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
505 if (!vlan) 510 if (!vlan)
506 return -ENOMEM; 511 return -ENOMEM;
507 512
513 vlan->bat_priv = bat_priv;
508 vlan->vid = vid; 514 vlan->vid = vid;
509 atomic_set(&vlan->refcount, 1); 515 atomic_set(&vlan->refcount, 1);
510 516
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
516 return err; 522 return err;
517 } 523 }
518 524
525 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
526 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
527 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
528
519 /* add a new TT local entry. This one will be marked with the NOPURGE 529 /* add a new TT local entry. This one will be marked with the NOPURGE
520 * flag 530 * flag
521 */ 531 */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
523 bat_priv->soft_iface->dev_addr, vid, 533 bat_priv->soft_iface->dev_addr, vid,
524 BATADV_NULL_IFINDEX, BATADV_NO_MARK); 534 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
525 535
526 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
527 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
528 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
529
530 return 0; 536 return 0;
531} 537}
532 538
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
538static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, 544static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
539 struct batadv_softif_vlan *vlan) 545 struct batadv_softif_vlan *vlan)
540{ 546{
541 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
542 hlist_del_rcu(&vlan->list);
543 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
544
545 batadv_sysfs_del_vlan(bat_priv, vlan);
546
547 /* explicitly remove the associated TT local entry because it is marked 547 /* explicitly remove the associated TT local entry because it is marked
548 * with the NOPURGE flag 548 * with the NOPURGE flag
549 */ 549 */
550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, 550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
551 vlan->vid, "vlan interface destroyed", false); 551 vlan->vid, "vlan interface destroyed", false);
552 552
553 batadv_sysfs_del_vlan(bat_priv, vlan);
553 batadv_softif_vlan_free_ref(vlan); 554 batadv_softif_vlan_free_ref(vlan);
554} 555}
555 556
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
567 unsigned short vid) 568 unsigned short vid)
568{ 569{
569 struct batadv_priv *bat_priv = netdev_priv(dev); 570 struct batadv_priv *bat_priv = netdev_priv(dev);
571 struct batadv_softif_vlan *vlan;
572 int ret;
570 573
571 /* only 802.1Q vlans are supported. 574 /* only 802.1Q vlans are supported.
572 * batman-adv does not know how to handle other types 575 * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
576 579
577 vid |= BATADV_VLAN_HAS_TAG; 580 vid |= BATADV_VLAN_HAS_TAG;
578 581
579 return batadv_softif_create_vlan(bat_priv, vid); 582 /* if a new vlan is getting created and it already exists, it means that
583 * it was not deleted yet. batadv_softif_vlan_get() increases the
584 * refcount in order to revive the object.
585 *
586 * if it does not exist then create it.
587 */
588 vlan = batadv_softif_vlan_get(bat_priv, vid);
589 if (!vlan)
590 return batadv_softif_create_vlan(bat_priv, vid);
591
592 /* recreate the sysfs object if it was already destroyed (and it should
593 * be since we received a kill_vid() for this vlan
594 */
595 if (!vlan->kobj) {
596 ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
597 if (ret) {
598 batadv_softif_vlan_free_ref(vlan);
599 return ret;
600 }
601 }
602
603 /* add a new TT local entry. This one will be marked with the NOPURGE
604 * flag. This must be added again, even if the vlan object already
605 * exists, because the entry was deleted by kill_vid()
606 */
607 batadv_tt_local_add(bat_priv->soft_iface,
608 bat_priv->soft_iface->dev_addr, vid,
609 BATADV_NULL_IFINDEX, BATADV_NO_MARK);
610
611 return 0;
580} 612}
581 613
582/** 614/**
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d636bde72c9a..5f59e7f899a0 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
511 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 511 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
512 struct batadv_tt_local_entry *tt_local; 512 struct batadv_tt_local_entry *tt_local;
513 struct batadv_tt_global_entry *tt_global = NULL; 513 struct batadv_tt_global_entry *tt_global = NULL;
514 struct batadv_softif_vlan *vlan;
514 struct net_device *in_dev = NULL; 515 struct net_device *in_dev = NULL;
515 struct hlist_head *head; 516 struct hlist_head *head;
516 struct batadv_tt_orig_list_entry *orig_entry; 517 struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
572 if (!tt_local) 573 if (!tt_local)
573 goto out; 574 goto out;
574 575
576 /* increase the refcounter of the related vlan */
577 vlan = batadv_softif_vlan_get(bat_priv, vid);
578
575 batadv_dbg(BATADV_DBG_TT, bat_priv, 579 batadv_dbg(BATADV_DBG_TT, bat_priv,
576 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 580 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
577 addr, BATADV_PRINT_VID(vid), 581 addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
604 if (unlikely(hash_added != 0)) { 608 if (unlikely(hash_added != 0)) {
605 /* remove the reference for the hash */ 609 /* remove the reference for the hash */
606 batadv_tt_local_entry_free_ref(tt_local); 610 batadv_tt_local_entry_free_ref(tt_local);
611 batadv_softif_vlan_free_ref(vlan);
607 goto out; 612 goto out;
608 } 613 }
609 614
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1009{ 1014{
1010 struct batadv_tt_local_entry *tt_local_entry; 1015 struct batadv_tt_local_entry *tt_local_entry;
1011 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1016 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1017 struct batadv_softif_vlan *vlan;
1012 1018
1013 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1019 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1014 if (!tt_local_entry) 1020 if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1039 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1045 hlist_del_rcu(&tt_local_entry->common.hash_entry);
1040 batadv_tt_local_entry_free_ref(tt_local_entry); 1046 batadv_tt_local_entry_free_ref(tt_local_entry);
1041 1047
1048 /* decrease the reference held for this vlan */
1049 vlan = batadv_softif_vlan_get(bat_priv, vid);
1050 batadv_softif_vlan_free_ref(vlan);
1051 batadv_softif_vlan_free_ref(vlan);
1052
1042out: 1053out:
1043 if (tt_local_entry) 1054 if (tt_local_entry)
1044 batadv_tt_local_entry_free_ref(tt_local_entry); 1055 batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1111 spinlock_t *list_lock; /* protects write access to the hash lists */ 1122 spinlock_t *list_lock; /* protects write access to the hash lists */
1112 struct batadv_tt_common_entry *tt_common_entry; 1123 struct batadv_tt_common_entry *tt_common_entry;
1113 struct batadv_tt_local_entry *tt_local; 1124 struct batadv_tt_local_entry *tt_local;
1125 struct batadv_softif_vlan *vlan;
1114 struct hlist_node *node_tmp; 1126 struct hlist_node *node_tmp;
1115 struct hlist_head *head; 1127 struct hlist_head *head;
1116 uint32_t i; 1128 uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1131 tt_local = container_of(tt_common_entry, 1143 tt_local = container_of(tt_common_entry,
1132 struct batadv_tt_local_entry, 1144 struct batadv_tt_local_entry,
1133 common); 1145 common);
1146
1147 /* decrease the reference held for this vlan */
1148 vlan = batadv_softif_vlan_get(bat_priv,
1149 tt_common_entry->vid);
1150 batadv_softif_vlan_free_ref(vlan);
1151 batadv_softif_vlan_free_ref(vlan);
1152
1134 batadv_tt_local_entry_free_ref(tt_local); 1153 batadv_tt_local_entry_free_ref(tt_local);
1135 } 1154 }
1136 spin_unlock_bh(list_lock); 1155 spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3139 struct batadv_hashtable *hash = bat_priv->tt.local_hash; 3158 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
3140 struct batadv_tt_common_entry *tt_common; 3159 struct batadv_tt_common_entry *tt_common;
3141 struct batadv_tt_local_entry *tt_local; 3160 struct batadv_tt_local_entry *tt_local;
3161 struct batadv_softif_vlan *vlan;
3142 struct hlist_node *node_tmp; 3162 struct hlist_node *node_tmp;
3143 struct hlist_head *head; 3163 struct hlist_head *head;
3144 spinlock_t *list_lock; /* protects write access to the hash lists */ 3164 spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3167 tt_local = container_of(tt_common, 3187 tt_local = container_of(tt_common,
3168 struct batadv_tt_local_entry, 3188 struct batadv_tt_local_entry,
3169 common); 3189 common);
3190
3191 /* decrease the reference held for this vlan */
3192 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3193 batadv_softif_vlan_free_ref(vlan);
3194 batadv_softif_vlan_free_ref(vlan);
3195
3170 batadv_tt_local_entry_free_ref(tt_local); 3196 batadv_tt_local_entry_free_ref(tt_local);
3171 } 3197 }
3172 spin_unlock_bh(list_lock); 3198 spin_unlock_bh(list_lock);
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 34891a56773f..8854c05622a9 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
687 687
688/** 688/**
689 * struct batadv_softif_vlan - per VLAN attributes set 689 * struct batadv_softif_vlan - per VLAN attributes set
690 * @bat_priv: pointer to the mesh object
690 * @vid: VLAN identifier 691 * @vid: VLAN identifier
691 * @kobj: kobject for sysfs vlan subdirectory 692 * @kobj: kobject for sysfs vlan subdirectory
692 * @ap_isolation: AP isolation state 693 * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
696 * @rcu: struct used for freeing in a RCU-safe manner 697 * @rcu: struct used for freeing in a RCU-safe manner
697 */ 698 */
698struct batadv_softif_vlan { 699struct batadv_softif_vlan {
700 struct batadv_priv *bat_priv;
699 unsigned short vid; 701 unsigned short vid;
700 struct kobject *kobj; 702 struct kobject *kobj;
701 atomic_t ap_isolation; /* boolean */ 703 atomic_t ap_isolation; /* boolean */
diff --git a/net/core/dev.c b/net/core/dev.c
index 7990984ca364..367a586d0c8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4096,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4096 skb->vlan_tci = 0; 4096 skb->vlan_tci = 0;
4097 skb->dev = napi->dev; 4097 skb->dev = napi->dev;
4098 skb->skb_iif = 0; 4098 skb->skb_iif = 0;
4099 skb->encapsulation = 0;
4100 skb_shinfo(skb)->gso_type = 0;
4099 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 4101 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4100 4102
4101 napi->skb = skb; 4103 napi->skb = skb;
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index 9acec61f5433..dd8696a3dbec 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
150 goto put; 150 goto put;
151 151
152 memcpy(*_result, upayload->data, len); 152 memcpy(*_result, upayload->data, len);
153 *_result[len] = '\0'; 153 (*_result)[len] = '\0';
154 154
155 if (_expiry) 155 if (_expiry)
156 *_expiry = rkey->expiry; 156 *_expiry = rkey->expiry;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d5e6836cf772..d156b3c5f363 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1429 int proto = iph->protocol; 1429 int proto = iph->protocol;
1430 int err = -ENOSYS; 1430 int err = -ENOSYS;
1431 1431
1432 if (skb->encapsulation)
1433 skb_set_inner_network_header(skb, nhoff);
1434
1432 csum_replace2(&iph->check, iph->tot_len, newlen); 1435 csum_replace2(&iph->check, iph->tot_len, newlen);
1433 iph->tot_len = newlen; 1436 iph->tot_len = newlen;
1434 1437
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index eb92deb12666..f0bdd47bbbcb 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
263 int err = -ENOENT; 263 int err = -ENOENT;
264 __be16 type; 264 __be16 type;
265 265
266 skb->encapsulation = 1;
267 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
268
266 type = greh->protocol; 269 type = greh->protocol;
267 if (greh->flags & GRE_KEY) 270 if (greh->flags & GRE_KEY)
268 grehlen += GRE_HEADER_SECTION; 271 grehlen += GRE_HEADER_SECTION;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 5e7aecea05cd..ad382499bace 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
288 optptr++; 288 optptr++;
289 continue; 289 continue;
290 } 290 }
291 if (unlikely(l < 2)) {
292 pp_ptr = optptr;
293 goto error;
294 }
291 optlen = optptr[1]; 295 optlen = optptr[1];
292 if (optlen < 2 || optlen > l) { 296 if (optlen < 2 || optlen > l) {
293 pp_ptr = optptr; 297 pp_ptr = optptr;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 4e86c59ec7f7..55046ecd083e 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
309 309
310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, 310 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
311 iph->daddr, 0); 311 iph->daddr, 0);
312 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 312 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
313 313
314 return tcp_gro_complete(skb); 314 return tcp_gro_complete(skb);
315} 315}
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 8517d3cd1aed..01b0ff9a0c2c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
73 73
74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr, 74 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
75 &iph->daddr, 0); 75 &iph->daddr, 0);
76 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 76 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
77 77
78 return tcp_gro_complete(skb); 78 return tcp_gro_complete(skb);
79} 79}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ab4566cfcbe4..8746ff9a8357 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
35{ 35{
36 INIT_LIST_HEAD(&afi->tables); 36 INIT_LIST_HEAD(&afi->tables);
37 nfnl_lock(NFNL_SUBSYS_NFTABLES); 37 nfnl_lock(NFNL_SUBSYS_NFTABLES);
38 list_add_tail(&afi->list, &net->nft.af_info); 38 list_add_tail_rcu(&afi->list, &net->nft.af_info);
39 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 39 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
40 return 0; 40 return 0;
41} 41}
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
51void nft_unregister_afinfo(struct nft_af_info *afi) 51void nft_unregister_afinfo(struct nft_af_info *afi)
52{ 52{
53 nfnl_lock(NFNL_SUBSYS_NFTABLES); 53 nfnl_lock(NFNL_SUBSYS_NFTABLES);
54 list_del(&afi->list); 54 list_del_rcu(&afi->list);
55 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 55 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
56} 56}
57EXPORT_SYMBOL_GPL(nft_unregister_afinfo); 57EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
277 struct net *net = sock_net(skb->sk); 277 struct net *net = sock_net(skb->sk);
278 int family = nfmsg->nfgen_family; 278 int family = nfmsg->nfgen_family;
279 279
280 list_for_each_entry(afi, &net->nft.af_info, list) { 280 rcu_read_lock();
281 cb->seq = net->nft.base_seq;
282
283 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
281 if (family != NFPROTO_UNSPEC && family != afi->family) 284 if (family != NFPROTO_UNSPEC && family != afi->family)
282 continue; 285 continue;
283 286
284 list_for_each_entry(table, &afi->tables, list) { 287 list_for_each_entry_rcu(table, &afi->tables, list) {
285 if (idx < s_idx) 288 if (idx < s_idx)
286 goto cont; 289 goto cont;
287 if (idx > s_idx) 290 if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
294 NLM_F_MULTI, 297 NLM_F_MULTI,
295 afi->family, table) < 0) 298 afi->family, table) < 0)
296 goto done; 299 goto done;
300
301 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
297cont: 302cont:
298 idx++; 303 idx++;
299 } 304 }
300 } 305 }
301done: 306done:
307 rcu_read_unlock();
302 cb->args[0] = idx; 308 cb->args[0] = idx;
303 return skb->len; 309 return skb->len;
304} 310}
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
407 if (flags & ~NFT_TABLE_F_DORMANT) 413 if (flags & ~NFT_TABLE_F_DORMANT)
408 return -EINVAL; 414 return -EINVAL;
409 415
416 if (flags == ctx->table->flags)
417 return 0;
418
410 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE, 419 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
411 sizeof(struct nft_trans_table)); 420 sizeof(struct nft_trans_table));
412 if (trans == NULL) 421 if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
514 module_put(afi->owner); 523 module_put(afi->owner);
515 return err; 524 return err;
516 } 525 }
517 list_add_tail(&table->list, &afi->tables); 526 list_add_tail_rcu(&table->list, &afi->tables);
518 return 0; 527 return 0;
519} 528}
520 529
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
546 if (err < 0) 555 if (err < 0)
547 return err; 556 return err;
548 557
549 list_del(&table->list); 558 list_del_rcu(&table->list);
550 return 0; 559 return 0;
551} 560}
552 561
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
635{ 644{
636 struct nft_stats *cpu_stats, total; 645 struct nft_stats *cpu_stats, total;
637 struct nlattr *nest; 646 struct nlattr *nest;
647 unsigned int seq;
648 u64 pkts, bytes;
638 int cpu; 649 int cpu;
639 650
640 memset(&total, 0, sizeof(total)); 651 memset(&total, 0, sizeof(total));
641 for_each_possible_cpu(cpu) { 652 for_each_possible_cpu(cpu) {
642 cpu_stats = per_cpu_ptr(stats, cpu); 653 cpu_stats = per_cpu_ptr(stats, cpu);
643 total.pkts += cpu_stats->pkts; 654 do {
644 total.bytes += cpu_stats->bytes; 655 seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
656 pkts = cpu_stats->pkts;
657 bytes = cpu_stats->bytes;
658 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
659 total.pkts += pkts;
660 total.bytes += bytes;
645 } 661 }
646 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS); 662 nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
647 if (nest == NULL) 663 if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
761 struct net *net = sock_net(skb->sk); 777 struct net *net = sock_net(skb->sk);
762 int family = nfmsg->nfgen_family; 778 int family = nfmsg->nfgen_family;
763 779
764 list_for_each_entry(afi, &net->nft.af_info, list) { 780 rcu_read_lock();
781 cb->seq = net->nft.base_seq;
782
783 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
765 if (family != NFPROTO_UNSPEC && family != afi->family) 784 if (family != NFPROTO_UNSPEC && family != afi->family)
766 continue; 785 continue;
767 786
768 list_for_each_entry(table, &afi->tables, list) { 787 list_for_each_entry_rcu(table, &afi->tables, list) {
769 list_for_each_entry(chain, &table->chains, list) { 788 list_for_each_entry_rcu(chain, &table->chains, list) {
770 if (idx < s_idx) 789 if (idx < s_idx)
771 goto cont; 790 goto cont;
772 if (idx > s_idx) 791 if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
778 NLM_F_MULTI, 797 NLM_F_MULTI,
779 afi->family, table, chain) < 0) 798 afi->family, table, chain) < 0)
780 goto done; 799 goto done;
800
801 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
781cont: 802cont:
782 idx++; 803 idx++;
783 } 804 }
784 } 805 }
785 } 806 }
786done: 807done:
808 rcu_read_unlock();
787 cb->args[0] = idx; 809 cb->args[0] = idx;
788 return skb->len; 810 return skb->len;
789} 811}
790 812
791
792static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb, 813static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
793 const struct nlmsghdr *nlh, 814 const struct nlmsghdr *nlh,
794 const struct nlattr * const nla[]) 815 const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
861 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS]) 882 if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
862 return ERR_PTR(-EINVAL); 883 return ERR_PTR(-EINVAL);
863 884
864 newstats = alloc_percpu(struct nft_stats); 885 newstats = netdev_alloc_pcpu_stats(struct nft_stats);
865 if (newstats == NULL) 886 if (newstats == NULL)
866 return ERR_PTR(-ENOMEM); 887 return ERR_PTR(-ENOMEM);
867 888
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1077 } 1098 }
1078 basechain->stats = stats; 1099 basechain->stats = stats;
1079 } else { 1100 } else {
1080 stats = alloc_percpu(struct nft_stats); 1101 stats = netdev_alloc_pcpu_stats(struct nft_stats);
1081 if (IS_ERR(stats)) { 1102 if (IS_ERR(stats)) {
1082 module_put(type->owner); 1103 module_put(type->owner);
1083 kfree(basechain); 1104 kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1130 goto err2; 1151 goto err2;
1131 1152
1132 table->use++; 1153 table->use++;
1133 list_add_tail(&chain->list, &table->chains); 1154 list_add_tail_rcu(&chain->list, &table->chains);
1134 return 0; 1155 return 0;
1135err2: 1156err2:
1136 if (!(table->flags & NFT_TABLE_F_DORMANT) && 1157 if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
1180 return err; 1201 return err;
1181 1202
1182 table->use--; 1203 table->use--;
1183 list_del(&chain->list); 1204 list_del_rcu(&chain->list);
1184 return 0; 1205 return 0;
1185} 1206}
1186 1207
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
1199{ 1220{
1200 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1221 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1201 if (type->family == NFPROTO_UNSPEC) 1222 if (type->family == NFPROTO_UNSPEC)
1202 list_add_tail(&type->list, &nf_tables_expressions); 1223 list_add_tail_rcu(&type->list, &nf_tables_expressions);
1203 else 1224 else
1204 list_add(&type->list, &nf_tables_expressions); 1225 list_add_rcu(&type->list, &nf_tables_expressions);
1205 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1226 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1206 return 0; 1227 return 0;
1207} 1228}
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
1216void nft_unregister_expr(struct nft_expr_type *type) 1237void nft_unregister_expr(struct nft_expr_type *type)
1217{ 1238{
1218 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1239 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1219 list_del(&type->list); 1240 list_del_rcu(&type->list);
1220 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1241 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1221} 1242}
1222EXPORT_SYMBOL_GPL(nft_unregister_expr); 1243EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1549 unsigned int idx = 0, s_idx = cb->args[0]; 1570 unsigned int idx = 0, s_idx = cb->args[0];
1550 struct net *net = sock_net(skb->sk); 1571 struct net *net = sock_net(skb->sk);
1551 int family = nfmsg->nfgen_family; 1572 int family = nfmsg->nfgen_family;
1552 u8 genctr = ACCESS_ONCE(net->nft.genctr);
1553 u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
1554 1573
1555 list_for_each_entry(afi, &net->nft.af_info, list) { 1574 rcu_read_lock();
1575 cb->seq = net->nft.base_seq;
1576
1577 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
1556 if (family != NFPROTO_UNSPEC && family != afi->family) 1578 if (family != NFPROTO_UNSPEC && family != afi->family)
1557 continue; 1579 continue;
1558 1580
1559 list_for_each_entry(table, &afi->tables, list) { 1581 list_for_each_entry_rcu(table, &afi->tables, list) {
1560 list_for_each_entry(chain, &table->chains, list) { 1582 list_for_each_entry_rcu(chain, &table->chains, list) {
1561 list_for_each_entry(rule, &chain->rules, list) { 1583 list_for_each_entry_rcu(rule, &chain->rules, list) {
1562 if (!nft_rule_is_active(net, rule)) 1584 if (!nft_rule_is_active(net, rule))
1563 goto cont; 1585 goto cont;
1564 if (idx < s_idx) 1586 if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
1572 NLM_F_MULTI | NLM_F_APPEND, 1594 NLM_F_MULTI | NLM_F_APPEND,
1573 afi->family, table, chain, rule) < 0) 1595 afi->family, table, chain, rule) < 0)
1574 goto done; 1596 goto done;
1597
1598 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1575cont: 1599cont:
1576 idx++; 1600 idx++;
1577 } 1601 }
@@ -1579,9 +1603,7 @@ cont:
1579 } 1603 }
1580 } 1604 }
1581done: 1605done:
1582 /* Invalidate this dump, a transition to the new generation happened */ 1606 rcu_read_unlock();
1583 if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
1584 return -EBUSY;
1585 1607
1586 cb->args[0] = idx; 1608 cb->args[0] = idx;
1587 return skb->len; 1609 return skb->len;
@@ -1932,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
1932int nft_register_set(struct nft_set_ops *ops) 1954int nft_register_set(struct nft_set_ops *ops)
1933{ 1955{
1934 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1956 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1935 list_add_tail(&ops->list, &nf_tables_set_ops); 1957 list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
1936 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1958 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1937 return 0; 1959 return 0;
1938} 1960}
@@ -1941,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
1941void nft_unregister_set(struct nft_set_ops *ops) 1963void nft_unregister_set(struct nft_set_ops *ops)
1942{ 1964{
1943 nfnl_lock(NFNL_SUBSYS_NFTABLES); 1965 nfnl_lock(NFNL_SUBSYS_NFTABLES);
1944 list_del(&ops->list); 1966 list_del_rcu(&ops->list);
1945 nfnl_unlock(NFNL_SUBSYS_NFTABLES); 1967 nfnl_unlock(NFNL_SUBSYS_NFTABLES);
1946} 1968}
1947EXPORT_SYMBOL_GPL(nft_unregister_set); 1969EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2234,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2234 if (cb->args[1]) 2256 if (cb->args[1])
2235 return skb->len; 2257 return skb->len;
2236 2258
2237 list_for_each_entry(set, &ctx->table->sets, list) { 2259 rcu_read_lock();
2260 cb->seq = ctx->net->nft.base_seq;
2261
2262 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2238 if (idx < s_idx) 2263 if (idx < s_idx)
2239 goto cont; 2264 goto cont;
2240 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2265 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2242,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
2242 cb->args[0] = idx; 2267 cb->args[0] = idx;
2243 goto done; 2268 goto done;
2244 } 2269 }
2270 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2245cont: 2271cont:
2246 idx++; 2272 idx++;
2247 } 2273 }
2248 cb->args[1] = 1; 2274 cb->args[1] = 1;
2249done: 2275done:
2276 rcu_read_unlock();
2250 return skb->len; 2277 return skb->len;
2251} 2278}
2252 2279
@@ -2260,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2260 if (cb->args[1]) 2287 if (cb->args[1])
2261 return skb->len; 2288 return skb->len;
2262 2289
2263 list_for_each_entry(table, &ctx->afi->tables, list) { 2290 rcu_read_lock();
2291 cb->seq = ctx->net->nft.base_seq;
2292
2293 list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
2264 if (cur_table) { 2294 if (cur_table) {
2265 if (cur_table != table) 2295 if (cur_table != table)
2266 continue; 2296 continue;
@@ -2269,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2269 } 2299 }
2270 ctx->table = table; 2300 ctx->table = table;
2271 idx = 0; 2301 idx = 0;
2272 list_for_each_entry(set, &ctx->table->sets, list) { 2302 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2273 if (idx < s_idx) 2303 if (idx < s_idx)
2274 goto cont; 2304 goto cont;
2275 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET, 2305 if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2278,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
2278 cb->args[2] = (unsigned long) table; 2308 cb->args[2] = (unsigned long) table;
2279 goto done; 2309 goto done;
2280 } 2310 }
2311 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2281cont: 2312cont:
2282 idx++; 2313 idx++;
2283 } 2314 }
2284 } 2315 }
2285 cb->args[1] = 1; 2316 cb->args[1] = 1;
2286done: 2317done:
2318 rcu_read_unlock();
2287 return skb->len; 2319 return skb->len;
2288} 2320}
2289 2321
@@ -2300,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2300 if (cb->args[1]) 2332 if (cb->args[1])
2301 return skb->len; 2333 return skb->len;
2302 2334
2303 list_for_each_entry(afi, &net->nft.af_info, list) { 2335 rcu_read_lock();
2336 cb->seq = net->nft.base_seq;
2337
2338 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
2304 if (cur_family) { 2339 if (cur_family) {
2305 if (afi->family != cur_family) 2340 if (afi->family != cur_family)
2306 continue; 2341 continue;
@@ -2308,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2308 cur_family = 0; 2343 cur_family = 0;
2309 } 2344 }
2310 2345
2311 list_for_each_entry(table, &afi->tables, list) { 2346 list_for_each_entry_rcu(table, &afi->tables, list) {
2312 if (cur_table) { 2347 if (cur_table) {
2313 if (cur_table != table) 2348 if (cur_table != table)
2314 continue; 2349 continue;
@@ -2319,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2319 ctx->table = table; 2354 ctx->table = table;
2320 ctx->afi = afi; 2355 ctx->afi = afi;
2321 idx = 0; 2356 idx = 0;
2322 list_for_each_entry(set, &ctx->table->sets, list) { 2357 list_for_each_entry_rcu(set, &ctx->table->sets, list) {
2323 if (idx < s_idx) 2358 if (idx < s_idx)
2324 goto cont; 2359 goto cont;
2325 if (nf_tables_fill_set(skb, ctx, set, 2360 if (nf_tables_fill_set(skb, ctx, set,
@@ -2330,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
2330 cb->args[3] = afi->family; 2365 cb->args[3] = afi->family;
2331 goto done; 2366 goto done;
2332 } 2367 }
2368 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2333cont: 2369cont:
2334 idx++; 2370 idx++;
2335 } 2371 }
@@ -2339,6 +2375,7 @@ cont:
2339 } 2375 }
2340 cb->args[1] = 1; 2376 cb->args[1] = 1;
2341done: 2377done:
2378 rcu_read_unlock();
2342 return skb->len; 2379 return skb->len;
2343} 2380}
2344 2381
@@ -2597,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
2597 if (err < 0) 2634 if (err < 0)
2598 goto err2; 2635 goto err2;
2599 2636
2600 list_add_tail(&set->list, &table->sets); 2637 list_add_tail_rcu(&set->list, &table->sets);
2601 table->use++; 2638 table->use++;
2602 return 0; 2639 return 0;
2603 2640
@@ -2617,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
2617 2654
2618static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) 2655static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
2619{ 2656{
2620 list_del(&set->list); 2657 list_del_rcu(&set->list);
2621 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); 2658 nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
2622 nft_set_destroy(set); 2659 nft_set_destroy(set);
2623} 2660}
@@ -2652,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
2652 if (err < 0) 2689 if (err < 0)
2653 return err; 2690 return err;
2654 2691
2655 list_del(&set->list); 2692 list_del_rcu(&set->list);
2656 ctx.table->use--; 2693 ctx.table->use--;
2657 return 0; 2694 return 0;
2658} 2695}
@@ -2704,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2704 } 2741 }
2705bind: 2742bind:
2706 binding->chain = ctx->chain; 2743 binding->chain = ctx->chain;
2707 list_add_tail(&binding->list, &set->bindings); 2744 list_add_tail_rcu(&binding->list, &set->bindings);
2708 return 0; 2745 return 0;
2709} 2746}
2710 2747
2711void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, 2748void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2712 struct nft_set_binding *binding) 2749 struct nft_set_binding *binding)
2713{ 2750{
2714 list_del(&binding->list); 2751 list_del_rcu(&binding->list);
2715 2752
2716 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && 2753 if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
2717 !(set->flags & NFT_SET_INACTIVE)) 2754 !(set->flags & NFT_SET_INACTIVE))
@@ -3346,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
3346 struct nft_set *set; 3383 struct nft_set *set;
3347 3384
3348 /* Bump generation counter, invalidate any dump in progress */ 3385 /* Bump generation counter, invalidate any dump in progress */
3349 net->nft.genctr++; 3386 while (++net->nft.base_seq == 0);
3350 3387
3351 /* A new generation has just started */ 3388 /* A new generation has just started */
3352 net->nft.gencursor = gencursor_next(net); 3389 net->nft.gencursor = gencursor_next(net);
@@ -3491,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3491 } 3528 }
3492 nft_trans_destroy(trans); 3529 nft_trans_destroy(trans);
3493 } else { 3530 } else {
3494 list_del(&trans->ctx.table->list); 3531 list_del_rcu(&trans->ctx.table->list);
3495 } 3532 }
3496 break; 3533 break;
3497 case NFT_MSG_DELTABLE: 3534 case NFT_MSG_DELTABLE:
3498 list_add_tail(&trans->ctx.table->list, 3535 list_add_tail_rcu(&trans->ctx.table->list,
3499 &trans->ctx.afi->tables); 3536 &trans->ctx.afi->tables);
3500 nft_trans_destroy(trans); 3537 nft_trans_destroy(trans);
3501 break; 3538 break;
3502 case NFT_MSG_NEWCHAIN: 3539 case NFT_MSG_NEWCHAIN:
@@ -3507,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
3507 nft_trans_destroy(trans); 3544 nft_trans_destroy(trans);
3508 } else { 3545 } else {
3509 trans->ctx.table->use--; 3546 trans->ctx.table->use--;
3510 list_del(&trans->ctx.chain->list); 3547 list_del_rcu(&trans->ctx.chain->list);
3511 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) && 3548 if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
3512 trans->ctx.chain->flags & NFT_BASE_CHAIN) { 3549 trans->ctx.chain->flags & NFT_BASE_CHAIN) {
3513 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops, 3550 nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3517,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
3517 break; 3554 break;
3518 case NFT_MSG_DELCHAIN: 3555 case NFT_MSG_DELCHAIN:
3519 trans->ctx.table->use++; 3556 trans->ctx.table->use++;
3520 list_add_tail(&trans->ctx.chain->list, 3557 list_add_tail_rcu(&trans->ctx.chain->list,
3521 &trans->ctx.table->chains); 3558 &trans->ctx.table->chains);
3522 nft_trans_destroy(trans); 3559 nft_trans_destroy(trans);
3523 break; 3560 break;
3524 case NFT_MSG_NEWRULE: 3561 case NFT_MSG_NEWRULE:
@@ -3532,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3532 break; 3569 break;
3533 case NFT_MSG_NEWSET: 3570 case NFT_MSG_NEWSET:
3534 trans->ctx.table->use--; 3571 trans->ctx.table->use--;
3535 list_del(&nft_trans_set(trans)->list); 3572 list_del_rcu(&nft_trans_set(trans)->list);
3536 break; 3573 break;
3537 case NFT_MSG_DELSET: 3574 case NFT_MSG_DELSET:
3538 trans->ctx.table->use++; 3575 trans->ctx.table->use++;
3539 list_add_tail(&nft_trans_set(trans)->list, 3576 list_add_tail_rcu(&nft_trans_set(trans)->list,
3540 &trans->ctx.table->sets); 3577 &trans->ctx.table->sets);
3541 nft_trans_destroy(trans); 3578 nft_trans_destroy(trans);
3542 break; 3579 break;
3543 case NFT_MSG_NEWSETELEM: 3580 case NFT_MSG_NEWSETELEM:
@@ -3951,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
3951{ 3988{
3952 INIT_LIST_HEAD(&net->nft.af_info); 3989 INIT_LIST_HEAD(&net->nft.af_info);
3953 INIT_LIST_HEAD(&net->nft.commit_list); 3990 INIT_LIST_HEAD(&net->nft.commit_list);
3991 net->nft.base_seq = 1;
3954 return 0; 3992 return 0;
3955} 3993}
3956 3994
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 345acfb1720b..3b90eb2b2c55 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
109 struct nft_data data[NFT_REG_MAX + 1]; 109 struct nft_data data[NFT_REG_MAX + 1];
110 unsigned int stackptr = 0; 110 unsigned int stackptr = 0;
111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; 111 struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
112 struct nft_stats __percpu *stats; 112 struct nft_stats *stats;
113 int rulenum; 113 int rulenum;
114 /* 114 /*
115 * Cache cursor to avoid problems in case that the cursor is updated 115 * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY); 205 nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
206 206
207 rcu_read_lock_bh(); 207 rcu_read_lock_bh();
208 stats = rcu_dereference(nft_base_chain(basechain)->stats); 208 stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
209 __this_cpu_inc(stats->pkts); 209 u64_stats_update_begin(&stats->syncp);
210 __this_cpu_add(stats->bytes, pkt->skb->len); 210 stats->pkts++;
211 stats->bytes += pkt->skb->len;
212 u64_stats_update_end(&stats->syncp);
211 rcu_read_unlock_bh(); 213 rcu_read_unlock_bh();
212 214
213 return nft_base_chain(basechain)->policy; 215 return nft_base_chain(basechain)->policy;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c39b583ace32..70c0be8d0121 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -38,6 +38,7 @@
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/rtnetlink.h> 39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/bitmap.h>
41#include <net/netlink.h> 42#include <net/netlink.h>
42#include <net/act_api.h> 43#include <net/act_api.h>
43#include <net/pkt_cls.h> 44#include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
460 return 0; 461 return 0;
461} 462}
462 463
464#define NR_U32_NODE (1<<12)
463static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) 465static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
464{ 466{
465 struct tc_u_knode *n; 467 struct tc_u_knode *n;
466 unsigned int i = 0x7FF; 468 unsigned long i;
469 unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
470 GFP_KERNEL);
471 if (!bitmap)
472 return handle | 0xFFF;
467 473
468 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) 474 for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
469 if (i < TC_U32_NODE(n->handle)) 475 set_bit(TC_U32_NODE(n->handle), bitmap);
470 i = TC_U32_NODE(n->handle);
471 i++;
472 476
473 return handle | (i > 0xFFF ? 0xFFF : i); 477 i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
478 if (i >= NR_U32_NODE)
479 i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
480
481 kfree(bitmap);
482 return handle | (i >= NR_U32_NODE ? 0xFFF : i);
474} 483}
475 484
476static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { 485static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {