aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/virtual/kvm/api.txt46
-rw-r--r--Documentation/x86/pti.txt2
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/sys_sio.c35
-rw-r--r--arch/alpha/lib/ev6-memset.S12
-rw-r--r--arch/arm64/kvm/handle_exit.c4
-rw-r--r--arch/mips/Kconfig12
-rw-r--r--arch/mips/Kconfig.debug14
-rw-r--r--arch/mips/ar7/platform.c2
-rw-r--r--arch/mips/ath25/devices.c2
-rw-r--r--arch/mips/kernel/mips-cm.c1
-rw-r--r--arch/mips/lib/Makefile3
-rw-r--r--arch/mips/lib/libgcc.h17
-rw-r--r--arch/mips/lib/multi3.c54
-rw-r--r--arch/mips/mm/uasm-micromips.c2
-rw-r--r--arch/mips/ralink/timer.c4
-rw-r--r--arch/mips/rb532/Makefile4
-rw-r--r--arch/mips/rb532/devices.c4
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h25
-rw-r--r--arch/powerpc/kvm/powerpc.c131
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--arch/s390/kvm/kvm-s390.c12
-rw-r--r--arch/s390/kvm/vsie.c10
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/include/asm/nospec-branch.h10
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c5
-rw-r--r--arch/x86/kernel/kprobes/opt.c23
-rw-r--r--arch/x86/kernel/process.c25
-rw-r--r--arch/x86/kernel/vmlinux.lds.S6
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/lib/retpoline.S5
-rw-r--r--arch/x86/mm/mem_encrypt.c2
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-integrity.c49
-rw-r--r--drivers/md/dm-thin-metadata.c6
-rw-r--r--drivers/md/persistent-data/dm-btree.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c73
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c17
-rw-r--r--fs/nfsd/auth.c6
-rw-r--r--fs/orangefs/devorangefs-req.c3
-rw-r--r--fs/orangefs/waitqueue.c4
-rw-r--r--include/linux/swapops.h21
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--kernel/irq/matrix.c20
-rw-r--r--mm/page_vma_mapped.c66
-rw-r--r--net/core/dev.c19
-rw-r--r--net/ipv4/esp4_offload.c3
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/tcp_offload.c3
-rw-r--r--net/ipv4/udp_offload.c3
-rw-r--r--net/ipv6/esp6_offload.c3
-rw-r--r--net/ipv6/tcpv6_offload.c3
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/rds/tcp.h2
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/sctp/offload.c3
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c8
-rw-r--r--virt/kvm/arm/vgic/vgic-v4.c2
68 files changed, 729 insertions, 162 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 57d3ee9e4bde..fc3ae951bc07 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3403,6 +3403,52 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
3403or if no page table is present for the addresses (e.g. when using 3403or if no page table is present for the addresses (e.g. when using
3404hugepages). 3404hugepages).
3405 3405
34064.108 KVM_PPC_GET_CPU_CHAR
3407
3408Capability: KVM_CAP_PPC_GET_CPU_CHAR
3409Architectures: powerpc
3410Type: vm ioctl
3411Parameters: struct kvm_ppc_cpu_char (out)
3412Returns: 0 on successful completion
3413 -EFAULT if struct kvm_ppc_cpu_char cannot be written
3414
3415This ioctl gives userspace information about certain characteristics
3416of the CPU relating to speculative execution of instructions and
3417possible information leakage resulting from speculative execution (see
3418CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is
3419returned in struct kvm_ppc_cpu_char, which looks like this:
3420
3421struct kvm_ppc_cpu_char {
3422 __u64 character; /* characteristics of the CPU */
3423 __u64 behaviour; /* recommended software behaviour */
3424 __u64 character_mask; /* valid bits in character */
3425 __u64 behaviour_mask; /* valid bits in behaviour */
3426};
3427
3428For extensibility, the character_mask and behaviour_mask fields
3429indicate which bits of character and behaviour have been filled in by
3430the kernel. If the set of defined bits is extended in future then
3431userspace will be able to tell whether it is running on a kernel that
3432knows about the new bits.
3433
3434The character field describes attributes of the CPU which can help
3435with preventing inadvertent information disclosure - specifically,
3436whether there is an instruction to flash-invalidate the L1 data cache
3437(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set
3438to a mode where entries can only be used by the thread that created
3439them, whether the bcctr[l] instruction prevents speculation, and
3440whether a speculation barrier instruction (ori 31,31,0) is provided.
3441
3442The behaviour field describes actions that software should take to
3443prevent inadvertent information disclosure, and thus describes which
3444vulnerabilities the hardware is subject to; specifically whether the
3445L1 data cache should be flushed when returning to user mode from the
3446kernel, and whether a speculation barrier should be placed between an
3447array bounds check and the array access.
3448
3449These fields use the same bit definitions as the new
3450H_GET_CPU_CHARACTERISTICS hypercall.
3451
34065. The kvm_run structure 34525. The kvm_run structure
3407------------------------ 3453------------------------
3408 3454
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt
index d11eff61fc9a..5cd58439ad2d 100644
--- a/Documentation/x86/pti.txt
+++ b/Documentation/x86/pti.txt
@@ -78,7 +78,7 @@ this protection comes at a cost:
78 non-PTI SYSCALL entry code, so requires mapping fewer 78 non-PTI SYSCALL entry code, so requires mapping fewer
79 things into the userspace page tables. The downside is 79 things into the userspace page tables. The downside is
80 that stacks must be switched at entry time. 80 that stacks must be switched at entry time.
81 d. Global pages are disabled for all kernel structures not 81 c. Global pages are disabled for all kernel structures not
82 mapped into both kernel and userspace page tables. This 82 mapped into both kernel and userspace page tables. This
83 feature of the MMU allows different processes to share TLB 83 feature of the MMU allows different processes to share TLB
84 entries mapping the kernel. Losing the feature means more 84 entries mapping the kernel. Losing the feature means more
diff --git a/MAINTAINERS b/MAINTAINERS
index 079af8b7ae8e..7ec1b088c07f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9102,6 +9102,7 @@ F: drivers/usb/image/microtek.*
9102 9102
9103MIPS 9103MIPS
9104M: Ralf Baechle <ralf@linux-mips.org> 9104M: Ralf Baechle <ralf@linux-mips.org>
9105M: James Hogan <jhogan@kernel.org>
9105L: linux-mips@linux-mips.org 9106L: linux-mips@linux-mips.org
9106W: http://www.linux-mips.org/ 9107W: http://www.linux-mips.org/
9107T: git git://git.linux-mips.org/pub/scm/ralf/linux.git 9108T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
diff --git a/Makefile b/Makefile
index bf5b8cbb9469..339397b838d3 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 4 2VERSION = 4
3PATCHLEVEL = 15 3PATCHLEVEL = 15
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc8 5EXTRAVERSION = -rc9
6NAME = Fearless Coyote 6NAME = Fearless Coyote
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index 37bd6d9b8eb9..a6bdc1da47ad 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -102,6 +102,15 @@ sio_pci_route(void)
102 alpha_mv.sys.sio.route_tab); 102 alpha_mv.sys.sio.route_tab);
103} 103}
104 104
105static bool sio_pci_dev_irq_needs_level(const struct pci_dev *dev)
106{
107 if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
108 (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
109 return false;
110
111 return true;
112}
113
105static unsigned int __init 114static unsigned int __init
106sio_collect_irq_levels(void) 115sio_collect_irq_levels(void)
107{ 116{
@@ -110,8 +119,7 @@ sio_collect_irq_levels(void)
110 119
111 /* Iterate through the devices, collecting IRQ levels. */ 120 /* Iterate through the devices, collecting IRQ levels. */
112 for_each_pci_dev(dev) { 121 for_each_pci_dev(dev) {
113 if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) && 122 if (!sio_pci_dev_irq_needs_level(dev))
114 (dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
115 continue; 123 continue;
116 124
117 if (dev->irq) 125 if (dev->irq)
@@ -120,8 +128,7 @@ sio_collect_irq_levels(void)
120 return level_bits; 128 return level_bits;
121} 129}
122 130
123static void __init 131static void __sio_fixup_irq_levels(unsigned int level_bits, bool reset)
124sio_fixup_irq_levels(unsigned int level_bits)
125{ 132{
126 unsigned int old_level_bits; 133 unsigned int old_level_bits;
127 134
@@ -139,12 +146,21 @@ sio_fixup_irq_levels(unsigned int level_bits)
139 */ 146 */
140 old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8); 147 old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
141 148
142 level_bits |= (old_level_bits & 0x71ff); 149 if (reset)
150 old_level_bits &= 0x71ff;
151
152 level_bits |= old_level_bits;
143 153
144 outb((level_bits >> 0) & 0xff, 0x4d0); 154 outb((level_bits >> 0) & 0xff, 0x4d0);
145 outb((level_bits >> 8) & 0xff, 0x4d1); 155 outb((level_bits >> 8) & 0xff, 0x4d1);
146} 156}
147 157
158static inline void
159sio_fixup_irq_levels(unsigned int level_bits)
160{
161 __sio_fixup_irq_levels(level_bits, true);
162}
163
148static inline int 164static inline int
149noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 165noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
150{ 166{
@@ -181,7 +197,14 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
181 const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5; 197 const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
182 int irq = COMMON_TABLE_LOOKUP, tmp; 198 int irq = COMMON_TABLE_LOOKUP, tmp;
183 tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq); 199 tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
184 return irq >= 0 ? tmp : -1; 200
201 irq = irq >= 0 ? tmp : -1;
202
203 /* Fixup IRQ level if an actual IRQ mapping is detected */
204 if (sio_pci_dev_irq_needs_level(dev) && irq >= 0)
205 __sio_fixup_irq_levels(1 << irq, false);
206
207 return irq;
185} 208}
186 209
187static inline int 210static inline int
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 316a99aa9efe..1cfcfbbea6f0 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -18,7 +18,7 @@
18 * The algorithm for the leading and trailing quadwords remains the same, 18 * The algorithm for the leading and trailing quadwords remains the same,
19 * however the loop has been unrolled to enable better memory throughput, 19 * however the loop has been unrolled to enable better memory throughput,
20 * and the code has been replicated for each of the entry points: __memset 20 * and the code has been replicated for each of the entry points: __memset
21 * and __memsetw to permit better scheduling to eliminate the stalling 21 * and __memset16 to permit better scheduling to eliminate the stalling
22 * encountered during the mask replication. 22 * encountered during the mask replication.
23 * A future enhancement might be to put in a byte store loop for really 23 * A future enhancement might be to put in a byte store loop for really
24 * small (say < 32 bytes) memset()s. Whether or not that change would be 24 * small (say < 32 bytes) memset()s. Whether or not that change would be
@@ -34,7 +34,7 @@
34 .globl memset 34 .globl memset
35 .globl __memset 35 .globl __memset
36 .globl ___memset 36 .globl ___memset
37 .globl __memsetw 37 .globl __memset16
38 .globl __constant_c_memset 38 .globl __constant_c_memset
39 39
40 .ent ___memset 40 .ent ___memset
@@ -415,9 +415,9 @@ end:
415 * to mask stalls. Note that entry point names also had to change 415 * to mask stalls. Note that entry point names also had to change
416 */ 416 */
417 .align 5 417 .align 5
418 .ent __memsetw 418 .ent __memset16
419 419
420__memsetw: 420__memset16:
421 .frame $30,0,$26,0 421 .frame $30,0,$26,0
422 .prologue 0 422 .prologue 0
423 423
@@ -596,8 +596,8 @@ end_w:
596 nop 596 nop
597 ret $31,($26),1 # L0 : 597 ret $31,($26),1 # L0 :
598 598
599 .end __memsetw 599 .end __memset16
600 EXPORT_SYMBOL(__memsetw) 600 EXPORT_SYMBOL(__memset16)
601 601
602memset = ___memset 602memset = ___memset
603__memset = ___memset 603__memset = ___memset
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 304203fa9e33..e60494f1eef9 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
45 45
46 ret = kvm_psci_call(vcpu); 46 ret = kvm_psci_call(vcpu);
47 if (ret < 0) { 47 if (ret < 0) {
48 kvm_inject_undefined(vcpu); 48 vcpu_set_reg(vcpu, 0, ~0UL);
49 return 1; 49 return 1;
50 } 50 }
51 51
@@ -54,7 +54,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
54 54
55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) 55static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
56{ 56{
57 kvm_inject_undefined(vcpu); 57 vcpu_set_reg(vcpu, 0, ~0UL);
58 return 1; 58 return 1;
59} 59}
60 60
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 350a990fc719..8e0b3702f1c0 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -259,6 +259,7 @@ config BCM47XX
259 select LEDS_GPIO_REGISTER 259 select LEDS_GPIO_REGISTER
260 select BCM47XX_NVRAM 260 select BCM47XX_NVRAM
261 select BCM47XX_SPROM 261 select BCM47XX_SPROM
262 select BCM47XX_SSB if !BCM47XX_BCMA
262 help 263 help
263 Support for BCM47XX based boards 264 Support for BCM47XX based boards
264 265
@@ -389,6 +390,7 @@ config LANTIQ
389 select SYS_SUPPORTS_32BIT_KERNEL 390 select SYS_SUPPORTS_32BIT_KERNEL
390 select SYS_SUPPORTS_MIPS16 391 select SYS_SUPPORTS_MIPS16
391 select SYS_SUPPORTS_MULTITHREADING 392 select SYS_SUPPORTS_MULTITHREADING
393 select SYS_SUPPORTS_VPE_LOADER
392 select SYS_HAS_EARLY_PRINTK 394 select SYS_HAS_EARLY_PRINTK
393 select GPIOLIB 395 select GPIOLIB
394 select SWAP_IO_SPACE 396 select SWAP_IO_SPACE
@@ -516,6 +518,7 @@ config MIPS_MALTA
516 select SYS_SUPPORTS_MIPS16 518 select SYS_SUPPORTS_MIPS16
517 select SYS_SUPPORTS_MULTITHREADING 519 select SYS_SUPPORTS_MULTITHREADING
518 select SYS_SUPPORTS_SMARTMIPS 520 select SYS_SUPPORTS_SMARTMIPS
521 select SYS_SUPPORTS_VPE_LOADER
519 select SYS_SUPPORTS_ZBOOT 522 select SYS_SUPPORTS_ZBOOT
520 select SYS_SUPPORTS_RELOCATABLE 523 select SYS_SUPPORTS_RELOCATABLE
521 select USE_OF 524 select USE_OF
@@ -2281,9 +2284,16 @@ config MIPSR2_TO_R6_EMULATOR
2281 The only reason this is a build-time option is to save ~14K from the 2284 The only reason this is a build-time option is to save ~14K from the
2282 final kernel image. 2285 final kernel image.
2283 2286
2287config SYS_SUPPORTS_VPE_LOADER
2288 bool
2289 depends on SYS_SUPPORTS_MULTITHREADING
2290 help
2291 Indicates that the platform supports the VPE loader, and provides
2292 physical_memsize.
2293
2284config MIPS_VPE_LOADER 2294config MIPS_VPE_LOADER
2285 bool "VPE loader support." 2295 bool "VPE loader support."
2286 depends on SYS_SUPPORTS_MULTITHREADING && MODULES 2296 depends on SYS_SUPPORTS_VPE_LOADER && MODULES
2287 select CPU_MIPSR2_IRQ_VI 2297 select CPU_MIPSR2_IRQ_VI
2288 select CPU_MIPSR2_IRQ_EI 2298 select CPU_MIPSR2_IRQ_EI
2289 select MIPS_MT 2299 select MIPS_MT
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 464af5e025d6..0749c3724543 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -124,30 +124,36 @@ config SCACHE_DEBUGFS
124 124
125 If unsure, say N. 125 If unsure, say N.
126 126
127menuconfig MIPS_CPS_NS16550 127menuconfig MIPS_CPS_NS16550_BOOL
128 bool "CPS SMP NS16550 UART output" 128 bool "CPS SMP NS16550 UART output"
129 depends on MIPS_CPS 129 depends on MIPS_CPS
130 help 130 help
131 Output debug information via an ns16550 compatible UART if exceptions 131 Output debug information via an ns16550 compatible UART if exceptions
132 occur early in the boot process of a secondary core. 132 occur early in the boot process of a secondary core.
133 133
134if MIPS_CPS_NS16550 134if MIPS_CPS_NS16550_BOOL
135
136config MIPS_CPS_NS16550
137 def_bool MIPS_CPS_NS16550_BASE != 0
135 138
136config MIPS_CPS_NS16550_BASE 139config MIPS_CPS_NS16550_BASE
137 hex "UART Base Address" 140 hex "UART Base Address"
138 default 0x1b0003f8 if MIPS_MALTA 141 default 0x1b0003f8 if MIPS_MALTA
142 default 0
139 help 143 help
140 The base address of the ns16550 compatible UART on which to output 144 The base address of the ns16550 compatible UART on which to output
141 debug information from the early stages of core startup. 145 debug information from the early stages of core startup.
142 146
147 This is only used if non-zero.
148
143config MIPS_CPS_NS16550_SHIFT 149config MIPS_CPS_NS16550_SHIFT
144 int "UART Register Shift" 150 int "UART Register Shift"
145 default 0 if MIPS_MALTA 151 default 0
146 help 152 help
147 The number of bits to shift ns16550 register indices by in order to 153 The number of bits to shift ns16550 register indices by in order to
148 form their addresses. That is, log base 2 of the span between 154 form their addresses. That is, log base 2 of the span between
149 adjacent ns16550 registers in the system. 155 adjacent ns16550 registers in the system.
150 156
151endif # MIPS_CPS_NS16550 157endif # MIPS_CPS_NS16550_BOOL
152 158
153endmenu 159endmenu
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 4674f1efbe7a..e1675c25d5d4 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -575,7 +575,7 @@ static int __init ar7_register_uarts(void)
575 uart_port.type = PORT_AR7; 575 uart_port.type = PORT_AR7;
576 uart_port.uartclk = clk_get_rate(bus_clk) / 2; 576 uart_port.uartclk = clk_get_rate(bus_clk) / 2;
577 uart_port.iotype = UPIO_MEM32; 577 uart_port.iotype = UPIO_MEM32;
578 uart_port.flags = UPF_FIXED_TYPE; 578 uart_port.flags = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
579 uart_port.regshift = 2; 579 uart_port.regshift = 2;
580 580
581 uart_port.line = 0; 581 uart_port.line = 0;
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
index e1156347da53..301a9028273c 100644
--- a/arch/mips/ath25/devices.c
+++ b/arch/mips/ath25/devices.c
@@ -73,6 +73,7 @@ const char *get_system_type(void)
73 73
74void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk) 74void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
75{ 75{
76#ifdef CONFIG_SERIAL_8250_CONSOLE
76 struct uart_port s; 77 struct uart_port s;
77 78
78 memset(&s, 0, sizeof(s)); 79 memset(&s, 0, sizeof(s));
@@ -85,6 +86,7 @@ void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
85 s.uartclk = uartclk; 86 s.uartclk = uartclk;
86 87
87 early_serial_setup(&s); 88 early_serial_setup(&s);
89#endif /* CONFIG_SERIAL_8250_CONSOLE */
88} 90}
89 91
90int __init ath25_add_wmac(int nr, u32 base, int irq) 92int __init ath25_add_wmac(int nr, u32 base, int irq)
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index dd5567b1e305..8f5bd04f320a 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -292,7 +292,6 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
292 *this_cpu_ptr(&cm_core_lock_flags)); 292 *this_cpu_ptr(&cm_core_lock_flags));
293 } else { 293 } else {
294 WARN_ON(cluster != 0); 294 WARN_ON(cluster != 0);
295 WARN_ON(vp != 0);
296 WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); 295 WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
297 296
298 /* 297 /*
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 78c2affeabf8..e84e12655fa8 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -16,4 +16,5 @@ obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
16obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o 16obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
17 17
18# libgcc-style stuff needed in the kernel 18# libgcc-style stuff needed in the kernel
19obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o ucmpdi2.o 19obj-y += ashldi3.o ashrdi3.o bswapsi.o bswapdi.o cmpdi2.o lshrdi3.o multi3.o \
20 ucmpdi2.o
diff --git a/arch/mips/lib/libgcc.h b/arch/mips/lib/libgcc.h
index 28002ed90c2c..199a7f96282f 100644
--- a/arch/mips/lib/libgcc.h
+++ b/arch/mips/lib/libgcc.h
@@ -10,10 +10,18 @@ typedef int word_type __attribute__ ((mode (__word__)));
10struct DWstruct { 10struct DWstruct {
11 int high, low; 11 int high, low;
12}; 12};
13
14struct TWstruct {
15 long long high, low;
16};
13#elif defined(__LITTLE_ENDIAN) 17#elif defined(__LITTLE_ENDIAN)
14struct DWstruct { 18struct DWstruct {
15 int low, high; 19 int low, high;
16}; 20};
21
22struct TWstruct {
23 long long low, high;
24};
17#else 25#else
18#error I feel sick. 26#error I feel sick.
19#endif 27#endif
@@ -23,4 +31,13 @@ typedef union {
23 long long ll; 31 long long ll;
24} DWunion; 32} DWunion;
25 33
34#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6)
35typedef int ti_type __attribute__((mode(TI)));
36
37typedef union {
38 struct TWstruct s;
39 ti_type ti;
40} TWunion;
41#endif
42
26#endif /* __ASM_LIBGCC_H */ 43#endif /* __ASM_LIBGCC_H */
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
new file mode 100644
index 000000000000..111ad475aa0c
--- /dev/null
+++ b/arch/mips/lib/multi3.c
@@ -0,0 +1,54 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3
4#include "libgcc.h"
5
6/*
7 * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
8 * specific case only we'll implement it here.
9 *
10 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
11 */
12#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
13
14/* multiply 64-bit values, low 64-bits returned */
15static inline long long notrace dmulu(long long a, long long b)
16{
17 long long res;
18
19 asm ("dmulu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
20 return res;
21}
22
23/* multiply 64-bit unsigned values, high 64-bits of 128-bit result returned */
24static inline long long notrace dmuhu(long long a, long long b)
25{
26 long long res;
27
28 asm ("dmuhu %0,%1,%2" : "=r" (res) : "r" (a), "r" (b));
29 return res;
30}
31
32/* multiply 128-bit values, low 128-bits returned */
33ti_type notrace __multi3(ti_type a, ti_type b)
34{
35 TWunion res, aa, bb;
36
37 aa.ti = a;
38 bb.ti = b;
39
40 /*
41 * a * b = (a.lo * b.lo)
42 * + 2^64 * (a.hi * b.lo + a.lo * b.hi)
43 * [+ 2^128 * (a.hi * b.hi)]
44 */
45 res.s.low = dmulu(aa.s.low, bb.s.low);
46 res.s.high = dmuhu(aa.s.low, bb.s.low);
47 res.s.high += dmulu(aa.s.high, bb.s.low);
48 res.s.high += dmulu(aa.s.low, bb.s.high);
49
50 return res.ti;
51}
52EXPORT_SYMBOL(__multi3);
53
54#endif /* 64BIT && CPU_MIPSR6 && GCC7 */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index cdb5a191b9d5..9bb6baa45da3 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -40,7 +40,7 @@
40 40
41#include "uasm.c" 41#include "uasm.c"
42 42
43static const struct insn const insn_table_MM[insn_invalid] = { 43static const struct insn insn_table_MM[insn_invalid] = {
44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD}, 44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD},
45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD}, 46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD},
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index d4469b20d176..4f46a4509f79 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -109,9 +109,9 @@ static int rt_timer_probe(struct platform_device *pdev)
109 } 109 }
110 110
111 rt->irq = platform_get_irq(pdev, 0); 111 rt->irq = platform_get_irq(pdev, 0);
112 if (!rt->irq) { 112 if (rt->irq < 0) {
113 dev_err(&pdev->dev, "failed to load irq\n"); 113 dev_err(&pdev->dev, "failed to load irq\n");
114 return -ENOENT; 114 return rt->irq;
115 } 115 }
116 116
117 rt->membase = devm_ioremap_resource(&pdev->dev, res); 117 rt->membase = devm_ioremap_resource(&pdev->dev, res);
diff --git a/arch/mips/rb532/Makefile b/arch/mips/rb532/Makefile
index efdecdb6e3ea..8186afca2234 100644
--- a/arch/mips/rb532/Makefile
+++ b/arch/mips/rb532/Makefile
@@ -2,4 +2,6 @@
2# Makefile for the RB532 board specific parts of the kernel 2# Makefile for the RB532 board specific parts of the kernel
3# 3#
4 4
5obj-y += irq.o time.o setup.o serial.o prom.o gpio.o devices.o 5obj-$(CONFIG_SERIAL_8250_CONSOLE) += serial.o
6
7obj-y += irq.o time.o setup.o prom.o gpio.o devices.o
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 32ea3e6731d6..354d258396ff 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -310,6 +310,8 @@ static int __init plat_setup_devices(void)
310 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); 310 return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
311} 311}
312 312
313#ifdef CONFIG_NET
314
313static int __init setup_kmac(char *s) 315static int __init setup_kmac(char *s)
314{ 316{
315 printk(KERN_INFO "korina mac = %s\n", s); 317 printk(KERN_INFO "korina mac = %s\n", s);
@@ -322,4 +324,6 @@ static int __init setup_kmac(char *s)
322 324
323__setup("kmac=", setup_kmac); 325__setup("kmac=", setup_kmac);
324 326
327#endif /* CONFIG_NET */
328
325arch_initcall(plat_setup_devices); 329arch_initcall(plat_setup_devices);
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 61d6049f4c1e..637b7263cb86 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -443,6 +443,31 @@ struct kvm_ppc_rmmu_info {
443 __u32 ap_encodings[8]; 443 __u32 ap_encodings[8];
444}; 444};
445 445
446/* For KVM_PPC_GET_CPU_CHAR */
447struct kvm_ppc_cpu_char {
448 __u64 character; /* characteristics of the CPU */
449 __u64 behaviour; /* recommended software behaviour */
450 __u64 character_mask; /* valid bits in character */
451 __u64 behaviour_mask; /* valid bits in behaviour */
452};
453
454/*
455 * Values for character and character_mask.
456 * These are identical to the values used by H_GET_CPU_CHARACTERISTICS.
457 */
458#define KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 (1ULL << 63)
459#define KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED (1ULL << 62)
460#define KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 (1ULL << 61)
461#define KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 (1ULL << 60)
462#define KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV (1ULL << 59)
463#define KVM_PPC_CPU_CHAR_BR_HINT_HONOURED (1ULL << 58)
464#define KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF (1ULL << 57)
465#define KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS (1ULL << 56)
466
467#define KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY (1ULL << 63)
468#define KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR (1ULL << 62)
469#define KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ULL << 61)
470
446/* Per-vcpu XICS interrupt controller state */ 471/* Per-vcpu XICS interrupt controller state */
447#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c) 472#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
448 473
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1915e86cef6f..0a7c88786ec0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -39,6 +39,10 @@
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/switch_to.h> 40#include <asm/switch_to.h>
41#include <asm/xive.h> 41#include <asm/xive.h>
42#ifdef CONFIG_PPC_PSERIES
43#include <asm/hvcall.h>
44#include <asm/plpar_wrappers.h>
45#endif
42 46
43#include "timing.h" 47#include "timing.h"
44#include "irq.h" 48#include "irq.h"
@@ -548,6 +552,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
548#ifdef CONFIG_KVM_XICS 552#ifdef CONFIG_KVM_XICS
549 case KVM_CAP_IRQ_XICS: 553 case KVM_CAP_IRQ_XICS:
550#endif 554#endif
555 case KVM_CAP_PPC_GET_CPU_CHAR:
551 r = 1; 556 r = 1;
552 break; 557 break;
553 558
@@ -1759,6 +1764,124 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1759 return r; 1764 return r;
1760} 1765}
1761 1766
1767#ifdef CONFIG_PPC_BOOK3S_64
1768/*
1769 * These functions check whether the underlying hardware is safe
1770 * against attacks based on observing the effects of speculatively
1771 * executed instructions, and whether it supplies instructions for
1772 * use in workarounds. The information comes from firmware, either
1773 * via the device tree on powernv platforms or from an hcall on
1774 * pseries platforms.
1775 */
1776#ifdef CONFIG_PPC_PSERIES
1777static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1778{
1779 struct h_cpu_char_result c;
1780 unsigned long rc;
1781
1782 if (!machine_is(pseries))
1783 return -ENOTTY;
1784
1785 rc = plpar_get_cpu_characteristics(&c);
1786 if (rc == H_SUCCESS) {
1787 cp->character = c.character;
1788 cp->behaviour = c.behaviour;
1789 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1790 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1791 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1792 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1793 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1794 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
1795 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
1796 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1797 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1798 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1799 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1800 }
1801 return 0;
1802}
1803#else
1804static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1805{
1806 return -ENOTTY;
1807}
1808#endif
1809
1810static inline bool have_fw_feat(struct device_node *fw_features,
1811 const char *state, const char *name)
1812{
1813 struct device_node *np;
1814 bool r = false;
1815
1816 np = of_get_child_by_name(fw_features, name);
1817 if (np) {
1818 r = of_property_read_bool(np, state);
1819 of_node_put(np);
1820 }
1821 return r;
1822}
1823
1824static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
1825{
1826 struct device_node *np, *fw_features;
1827 int r;
1828
1829 memset(cp, 0, sizeof(*cp));
1830 r = pseries_get_cpu_char(cp);
1831 if (r != -ENOTTY)
1832 return r;
1833
1834 np = of_find_node_by_name(NULL, "ibm,opal");
1835 if (np) {
1836 fw_features = of_get_child_by_name(np, "fw-features");
1837 of_node_put(np);
1838 if (!fw_features)
1839 return 0;
1840 if (have_fw_feat(fw_features, "enabled",
1841 "inst-spec-barrier-ori31,31,0"))
1842 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
1843 if (have_fw_feat(fw_features, "enabled",
1844 "fw-bcctrl-serialized"))
1845 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
1846 if (have_fw_feat(fw_features, "enabled",
1847 "inst-l1d-flush-ori30,30,0"))
1848 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
1849 if (have_fw_feat(fw_features, "enabled",
1850 "inst-l1d-flush-trig2"))
1851 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
1852 if (have_fw_feat(fw_features, "enabled",
1853 "fw-l1d-thread-split"))
1854 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
1855 if (have_fw_feat(fw_features, "enabled",
1856 "fw-count-cache-disabled"))
1857 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1858 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
1859 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
1860 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
1861 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
1862 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
1863 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
1864
1865 if (have_fw_feat(fw_features, "enabled",
1866 "speculation-policy-favor-security"))
1867 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
1868 if (!have_fw_feat(fw_features, "disabled",
1869 "needs-l1d-flush-msr-pr-0-to-1"))
1870 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
1871 if (!have_fw_feat(fw_features, "disabled",
1872 "needs-spec-barrier-for-bound-checks"))
1873 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1874 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
1875 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
1876 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
1877
1878 of_node_put(fw_features);
1879 }
1880
1881 return 0;
1882}
1883#endif
1884
1762long kvm_arch_vm_ioctl(struct file *filp, 1885long kvm_arch_vm_ioctl(struct file *filp,
1763 unsigned int ioctl, unsigned long arg) 1886 unsigned int ioctl, unsigned long arg)
1764{ 1887{
@@ -1861,6 +1984,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
1861 r = -EFAULT; 1984 r = -EFAULT;
1862 break; 1985 break;
1863 } 1986 }
1987 case KVM_PPC_GET_CPU_CHAR: {
1988 struct kvm_ppc_cpu_char cpuchar;
1989
1990 r = kvmppc_get_cpu_char(&cpuchar);
1991 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
1992 r = -EFAULT;
1993 break;
1994 }
1864 default: { 1995 default: {
1865 struct kvm *kvm = filp->private_data; 1996 struct kvm *kvm = filp->private_data;
1866 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); 1997 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e14f381757f6..c1b0a9ac1dc8 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -207,7 +207,8 @@ struct kvm_s390_sie_block {
207 __u16 ipa; /* 0x0056 */ 207 __u16 ipa; /* 0x0056 */
208 __u32 ipb; /* 0x0058 */ 208 __u32 ipb; /* 0x0058 */
209 __u32 scaoh; /* 0x005c */ 209 __u32 scaoh; /* 0x005c */
210 __u8 reserved60; /* 0x0060 */ 210#define FPF_BPBC 0x20
211 __u8 fpf; /* 0x0060 */
211#define ECB_GS 0x40 212#define ECB_GS 0x40
212#define ECB_TE 0x10 213#define ECB_TE 0x10
213#define ECB_SRSI 0x04 214#define ECB_SRSI 0x04
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 38535a57fef8..4cdaa55fabfe 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -224,6 +224,7 @@ struct kvm_guest_debug_arch {
224#define KVM_SYNC_RICCB (1UL << 7) 224#define KVM_SYNC_RICCB (1UL << 7)
225#define KVM_SYNC_FPRS (1UL << 8) 225#define KVM_SYNC_FPRS (1UL << 8)
226#define KVM_SYNC_GSCB (1UL << 9) 226#define KVM_SYNC_GSCB (1UL << 9)
227#define KVM_SYNC_BPBC (1UL << 10)
227/* length and alignment of the sdnx as a power of two */ 228/* length and alignment of the sdnx as a power of two */
228#define SDNXC 8 229#define SDNXC 8
229#define SDNXL (1UL << SDNXC) 230#define SDNXL (1UL << SDNXC)
@@ -247,7 +248,9 @@ struct kvm_sync_regs {
247 }; 248 };
248 __u8 reserved[512]; /* for future vector expansion */ 249 __u8 reserved[512]; /* for future vector expansion */
249 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ 250 __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
250 __u8 padding1[52]; /* riccb needs to be 64byte aligned */ 251 __u8 bpbc : 1; /* bp mode */
252 __u8 reserved2 : 7;
253 __u8 padding1[51]; /* riccb needs to be 64byte aligned */
251 __u8 riccb[64]; /* runtime instrumentation controls block */ 254 __u8 riccb[64]; /* runtime instrumentation controls block */
252 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ 255 __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
253 union { 256 union {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2c93cbbcd15e..2598cf243b86 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -421,6 +421,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
421 case KVM_CAP_S390_GS: 421 case KVM_CAP_S390_GS:
422 r = test_facility(133); 422 r = test_facility(133);
423 break; 423 break;
424 case KVM_CAP_S390_BPB:
425 r = test_facility(82);
426 break;
424 default: 427 default:
425 r = 0; 428 r = 0;
426 } 429 }
@@ -2198,6 +2201,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2198 kvm_s390_set_prefix(vcpu, 0); 2201 kvm_s390_set_prefix(vcpu, 0);
2199 if (test_kvm_facility(vcpu->kvm, 64)) 2202 if (test_kvm_facility(vcpu->kvm, 64))
2200 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; 2203 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
2204 if (test_kvm_facility(vcpu->kvm, 82))
2205 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
2201 if (test_kvm_facility(vcpu->kvm, 133)) 2206 if (test_kvm_facility(vcpu->kvm, 133))
2202 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; 2207 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
2203 /* fprs can be synchronized via vrs, even if the guest has no vx. With 2208 /* fprs can be synchronized via vrs, even if the guest has no vx. With
@@ -2339,6 +2344,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2339 current->thread.fpu.fpc = 0; 2344 current->thread.fpu.fpc = 0;
2340 vcpu->arch.sie_block->gbea = 1; 2345 vcpu->arch.sie_block->gbea = 1;
2341 vcpu->arch.sie_block->pp = 0; 2346 vcpu->arch.sie_block->pp = 0;
2347 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
2342 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 2348 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2343 kvm_clear_async_pf_completion_queue(vcpu); 2349 kvm_clear_async_pf_completion_queue(vcpu);
2344 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 2350 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -3298,6 +3304,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3298 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; 3304 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3299 vcpu->arch.gs_enabled = 1; 3305 vcpu->arch.gs_enabled = 1;
3300 } 3306 }
3307 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3308 test_kvm_facility(vcpu->kvm, 82)) {
3309 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3310 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3311 }
3301 save_access_regs(vcpu->arch.host_acrs); 3312 save_access_regs(vcpu->arch.host_acrs);
3302 restore_access_regs(vcpu->run->s.regs.acrs); 3313 restore_access_regs(vcpu->run->s.regs.acrs);
3303 /* save host (userspace) fprs/vrs */ 3314 /* save host (userspace) fprs/vrs */
@@ -3344,6 +3355,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3344 kvm_run->s.regs.pft = vcpu->arch.pfault_token; 3355 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3345 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 3356 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3346 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 3357 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
3358 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
3347 save_access_regs(vcpu->run->s.regs.acrs); 3359 save_access_regs(vcpu->run->s.regs.acrs);
3348 restore_access_regs(vcpu->arch.host_acrs); 3360 restore_access_regs(vcpu->arch.host_acrs);
3349 /* Save guest register state */ 3361 /* Save guest register state */
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 5d6ae0326d9e..751348348477 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -223,6 +223,12 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
223 memcpy(scb_o->gcr, scb_s->gcr, 128); 223 memcpy(scb_o->gcr, scb_s->gcr, 128);
224 scb_o->pp = scb_s->pp; 224 scb_o->pp = scb_s->pp;
225 225
226 /* branch prediction */
227 if (test_kvm_facility(vcpu->kvm, 82)) {
228 scb_o->fpf &= ~FPF_BPBC;
229 scb_o->fpf |= scb_s->fpf & FPF_BPBC;
230 }
231
226 /* interrupt intercept */ 232 /* interrupt intercept */
227 switch (scb_s->icptcode) { 233 switch (scb_s->icptcode) {
228 case ICPT_PROGI: 234 case ICPT_PROGI:
@@ -265,6 +271,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
265 scb_s->ecb3 = 0; 271 scb_s->ecb3 = 0;
266 scb_s->ecd = 0; 272 scb_s->ecd = 0;
267 scb_s->fac = 0; 273 scb_s->fac = 0;
274 scb_s->fpf = 0;
268 275
269 rc = prepare_cpuflags(vcpu, vsie_page); 276 rc = prepare_cpuflags(vcpu, vsie_page);
270 if (rc) 277 if (rc)
@@ -324,6 +331,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
324 prefix_unmapped(vsie_page); 331 prefix_unmapped(vsie_page);
325 scb_s->ecb |= scb_o->ecb & ECB_TE; 332 scb_s->ecb |= scb_o->ecb & ECB_TE;
326 } 333 }
334 /* branch prediction */
335 if (test_kvm_facility(vcpu->kvm, 82))
336 scb_s->fpf |= scb_o->fpf & FPF_BPBC;
327 /* SIMD */ 337 /* SIMD */
328 if (test_kvm_facility(vcpu->kvm, 129)) { 338 if (test_kvm_facility(vcpu->kvm, 129)) {
329 scb_s->eca |= scb_o->eca & ECA_VX; 339 scb_s->eca |= scb_o->eca & ECA_VX;
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index aa15b4c0e3d1..ff6f8022612c 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1264,7 +1264,7 @@ idtentry async_page_fault do_async_page_fault has_error_code=1
1264#endif 1264#endif
1265 1265
1266#ifdef CONFIG_X86_MCE 1266#ifdef CONFIG_X86_MCE
1267idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip) 1267idtentry machine_check do_mce has_error_code=0 paranoid=1
1268#endif 1268#endif
1269 1269
1270/* 1270/*
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 7b45d8424150..4ad41087ce0e 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -194,6 +194,9 @@ enum spectre_v2_mitigation {
194 SPECTRE_V2_IBRS, 194 SPECTRE_V2_IBRS,
195}; 195};
196 196
197extern char __indirect_thunk_start[];
198extern char __indirect_thunk_end[];
199
197/* 200/*
198 * On VMEXIT we must ensure that no RSB predictions learned in the guest 201 * On VMEXIT we must ensure that no RSB predictions learned in the guest
199 * can be followed in the host, by overwriting the RSB completely. Both 202 * can be followed in the host, by overwriting the RSB completely. Both
@@ -203,16 +206,17 @@ enum spectre_v2_mitigation {
203static inline void vmexit_fill_RSB(void) 206static inline void vmexit_fill_RSB(void)
204{ 207{
205#ifdef CONFIG_RETPOLINE 208#ifdef CONFIG_RETPOLINE
206 unsigned long loops = RSB_CLEAR_LOOPS / 2; 209 unsigned long loops;
207 210
208 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE 211 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
209 ALTERNATIVE("jmp 910f", 212 ALTERNATIVE("jmp 910f",
210 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), 213 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
211 X86_FEATURE_RETPOLINE) 214 X86_FEATURE_RETPOLINE)
212 "910:" 215 "910:"
213 : "=&r" (loops), ASM_CALL_CONSTRAINT 216 : "=r" (loops), ASM_CALL_CONSTRAINT
214 : "r" (loops) : "memory" ); 217 : : "memory" );
215#endif 218#endif
216} 219}
220
217#endif /* __ASSEMBLY__ */ 221#endif /* __ASSEMBLY__ */
218#endif /* __NOSPEC_BRANCH_H__ */ 222#endif /* __NOSPEC_BRANCH_H__ */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 31051f35cbb7..3de69330e6c5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -88,6 +88,7 @@ dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
88#ifdef CONFIG_X86_32 88#ifdef CONFIG_X86_32
89dotraplinkage void do_iret_error(struct pt_regs *, long); 89dotraplinkage void do_iret_error(struct pt_regs *, long);
90#endif 90#endif
91dotraplinkage void do_mce(struct pt_regs *, long);
91 92
92static inline int get_si_code(unsigned long condition) 93static inline int get_si_code(unsigned long condition)
93{ 94{
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b1d616d08eee..868e412b4f0c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1785,6 +1785,11 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1785void (*machine_check_vector)(struct pt_regs *, long error_code) = 1785void (*machine_check_vector)(struct pt_regs *, long error_code) =
1786 unexpected_machine_check; 1786 unexpected_machine_check;
1787 1787
1788dotraplinkage void do_mce(struct pt_regs *regs, long error_code)
1789{
1790 machine_check_vector(regs, error_code);
1791}
1792
1788/* 1793/*
1789 * Called for each booted CPU to set up machine checks. 1794 * Called for each booted CPU to set up machine checks.
1790 * Must be called with preempt off: 1795 * Must be called with preempt off:
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index e941136e24d8..203d398802a3 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -40,6 +40,7 @@
40#include <asm/debugreg.h> 40#include <asm/debugreg.h>
41#include <asm/set_memory.h> 41#include <asm/set_memory.h>
42#include <asm/sections.h> 42#include <asm/sections.h>
43#include <asm/nospec-branch.h>
43 44
44#include "common.h" 45#include "common.h"
45 46
@@ -203,7 +204,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
203} 204}
204 205
205/* Check whether insn is indirect jump */ 206/* Check whether insn is indirect jump */
206static int insn_is_indirect_jump(struct insn *insn) 207static int __insn_is_indirect_jump(struct insn *insn)
207{ 208{
208 return ((insn->opcode.bytes[0] == 0xff && 209 return ((insn->opcode.bytes[0] == 0xff &&
209 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ 210 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -237,6 +238,26 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
237 return (start <= target && target <= start + len); 238 return (start <= target && target <= start + len);
238} 239}
239 240
241static int insn_is_indirect_jump(struct insn *insn)
242{
243 int ret = __insn_is_indirect_jump(insn);
244
245#ifdef CONFIG_RETPOLINE
246 /*
247 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
248 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
249 * older gcc may use indirect jump. So we add this check instead of
250 * replace indirect-jump check.
251 */
252 if (!ret)
253 ret = insn_jump_into_range(insn,
254 (unsigned long)__indirect_thunk_start,
255 (unsigned long)__indirect_thunk_end -
256 (unsigned long)__indirect_thunk_start);
257#endif
258 return ret;
259}
260
240/* Decode whole function to ensure any instructions don't jump into target */ 261/* Decode whole function to ensure any instructions don't jump into target */
241static int can_optimize(unsigned long paddr) 262static int can_optimize(unsigned long paddr)
242{ 263{
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 832a6acd730f..cb368c2a22ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -380,19 +380,24 @@ void stop_this_cpu(void *dummy)
380 disable_local_APIC(); 380 disable_local_APIC();
381 mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); 381 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
382 382
383 /*
384 * Use wbinvd on processors that support SME. This provides support
385 * for performing a successful kexec when going from SME inactive
386 * to SME active (or vice-versa). The cache must be cleared so that
387 * if there are entries with the same physical address, both with and
388 * without the encryption bit, they don't race each other when flushed
389 * and potentially end up with the wrong entry being committed to
390 * memory.
391 */
392 if (boot_cpu_has(X86_FEATURE_SME))
393 native_wbinvd();
383 for (;;) { 394 for (;;) {
384 /* 395 /*
385 * Use wbinvd followed by hlt to stop the processor. This 396 * Use native_halt() so that memory contents don't change
386 * provides support for kexec on a processor that supports 397 * (stack usage and variables) after possibly issuing the
387 * SME. With kexec, going from SME inactive to SME active 398 * native_wbinvd() above.
388 * requires clearing cache entries so that addresses without
389 * the encryption bit set don't corrupt the same physical
390 * address that has the encryption bit set when caches are
391 * flushed. To achieve this a wbinvd is performed followed by
392 * a hlt. Even if the processor is not in the kexec/SME
393 * scenario this only adds a wbinvd to a halting processor.
394 */ 399 */
395 asm volatile("wbinvd; hlt" : : : "memory"); 400 native_halt();
396 } 401 }
397} 402}
398 403
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 1e413a9326aa..9b138a06c1a4 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -124,6 +124,12 @@ SECTIONS
124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); 124 ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
125#endif 125#endif
126 126
127#ifdef CONFIG_RETPOLINE
128 __indirect_thunk_start = .;
129 *(.text.__x86.indirect_thunk)
130 __indirect_thunk_end = .;
131#endif
132
127 /* End of text section */ 133 /* End of text section */
128 _etext = .; 134 _etext = .;
129 } :text = 0x9090 135 } :text = 0x9090
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1cec2c62a0b0..c53298dfbf50 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7496,13 +7496,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
7496 7496
7497int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 7497int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
7498{ 7498{
7499 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) { 7499 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
7500 /* 7500 /*
7501 * When EFER.LME and CR0.PG are set, the processor is in 7501 * When EFER.LME and CR0.PG are set, the processor is in
7502 * 64-bit mode (though maybe in a 32-bit code segment). 7502 * 64-bit mode (though maybe in a 32-bit code segment).
7503 * CR4.PAE and EFER.LMA must be set. 7503 * CR4.PAE and EFER.LMA must be set.
7504 */ 7504 */
7505 if (!(sregs->cr4 & X86_CR4_PAE_BIT) 7505 if (!(sregs->cr4 & X86_CR4_PAE)
7506 || !(sregs->efer & EFER_LMA)) 7506 || !(sregs->efer & EFER_LMA))
7507 return -EINVAL; 7507 return -EINVAL;
7508 } else { 7508 } else {
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index cb45c6cb465f..dfb2ba91b670 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -9,7 +9,7 @@
9#include <asm/nospec-branch.h> 9#include <asm/nospec-branch.h>
10 10
11.macro THUNK reg 11.macro THUNK reg
12 .section .text.__x86.indirect_thunk.\reg 12 .section .text.__x86.indirect_thunk
13 13
14ENTRY(__x86_indirect_thunk_\reg) 14ENTRY(__x86_indirect_thunk_\reg)
15 CFI_STARTPROC 15 CFI_STARTPROC
@@ -25,7 +25,8 @@ ENDPROC(__x86_indirect_thunk_\reg)
25 * than one per register with the correct names. So we do it 25 * than one per register with the correct names. So we do it
26 * the simple and nasty way... 26 * the simple and nasty way...
27 */ 27 */
28#define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg) 28#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
29#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
29#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) 30#define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg)
30 31
31GENERATE_THUNK(_ASM_AX) 32GENERATE_THUNK(_ASM_AX)
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 3ef362f598e3..e1d61e8500f9 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -738,7 +738,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
738 return total; 738 return total;
739} 739}
740 740
741void __init sme_encrypt_kernel(struct boot_params *bp) 741void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp)
742{ 742{
743 unsigned long workarea_start, workarea_end, workarea_len; 743 unsigned long workarea_start, workarea_end, workarea_len;
744 unsigned long execute_start, execute_end, execute_len; 744 unsigned long execute_start, execute_end, execute_len;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9fc12f556534..554d60394c06 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1954,10 +1954,15 @@ static int crypt_setkey(struct crypt_config *cc)
1954 /* Ignore extra keys (which are used for IV etc) */ 1954 /* Ignore extra keys (which are used for IV etc) */
1955 subkey_size = crypt_subkey_size(cc); 1955 subkey_size = crypt_subkey_size(cc);
1956 1956
1957 if (crypt_integrity_hmac(cc)) 1957 if (crypt_integrity_hmac(cc)) {
1958 if (subkey_size < cc->key_mac_size)
1959 return -EINVAL;
1960
1958 crypt_copy_authenckey(cc->authenc_key, cc->key, 1961 crypt_copy_authenckey(cc->authenc_key, cc->key,
1959 subkey_size - cc->key_mac_size, 1962 subkey_size - cc->key_mac_size,
1960 cc->key_mac_size); 1963 cc->key_mac_size);
1964 }
1965
1961 for (i = 0; i < cc->tfms_count; i++) { 1966 for (i = 0; i < cc->tfms_count; i++) {
1962 if (crypt_integrity_hmac(cc)) 1967 if (crypt_integrity_hmac(cc))
1963 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i], 1968 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
@@ -2053,9 +2058,6 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
2053 2058
2054 ret = crypt_setkey(cc); 2059 ret = crypt_setkey(cc);
2055 2060
2056 /* wipe the kernel key payload copy in each case */
2057 memset(cc->key, 0, cc->key_size * sizeof(u8));
2058
2059 if (!ret) { 2061 if (!ret) {
2060 set_bit(DM_CRYPT_KEY_VALID, &cc->flags); 2062 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2061 kzfree(cc->key_string); 2063 kzfree(cc->key_string);
@@ -2523,6 +2525,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, char *cipher_in, char *key)
2523 } 2525 }
2524 } 2526 }
2525 2527
2528 /* wipe the kernel key payload copy */
2529 if (cc->key_string)
2530 memset(cc->key, 0, cc->key_size * sizeof(u8));
2531
2526 return ret; 2532 return ret;
2527} 2533}
2528 2534
@@ -2740,6 +2746,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2740 cc->tag_pool_max_sectors * cc->on_disk_tag_size); 2746 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
2741 if (!cc->tag_pool) { 2747 if (!cc->tag_pool) {
2742 ti->error = "Cannot allocate integrity tags mempool"; 2748 ti->error = "Cannot allocate integrity tags mempool";
2749 ret = -ENOMEM;
2743 goto bad; 2750 goto bad;
2744 } 2751 }
2745 2752
@@ -2961,6 +2968,9 @@ static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
2961 return ret; 2968 return ret;
2962 if (cc->iv_gen_ops && cc->iv_gen_ops->init) 2969 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
2963 ret = cc->iv_gen_ops->init(cc); 2970 ret = cc->iv_gen_ops->init(cc);
2971 /* wipe the kernel key payload copy */
2972 if (cc->key_string)
2973 memset(cc->key, 0, cc->key_size * sizeof(u8));
2964 return ret; 2974 return ret;
2965 } 2975 }
2966 if (argc == 2 && !strcasecmp(argv[1], "wipe")) { 2976 if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
@@ -3007,7 +3017,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3007 3017
3008static struct target_type crypt_target = { 3018static struct target_type crypt_target = {
3009 .name = "crypt", 3019 .name = "crypt",
3010 .version = {1, 18, 0}, 3020 .version = {1, 18, 1},
3011 .module = THIS_MODULE, 3021 .module = THIS_MODULE,
3012 .ctr = crypt_ctr, 3022 .ctr = crypt_ctr,
3013 .dtr = crypt_dtr, 3023 .dtr = crypt_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 05c7bfd0c9d9..46d7c8749222 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2559,7 +2559,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2559 int r = 0; 2559 int r = 0;
2560 unsigned i; 2560 unsigned i;
2561 __u64 journal_pages, journal_desc_size, journal_tree_size; 2561 __u64 journal_pages, journal_desc_size, journal_tree_size;
2562 unsigned char *crypt_data = NULL; 2562 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2563 struct skcipher_request *req = NULL;
2563 2564
2564 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); 2565 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2565 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); 2566 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
@@ -2617,9 +2618,20 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2617 2618
2618 if (blocksize == 1) { 2619 if (blocksize == 1) {
2619 struct scatterlist *sg; 2620 struct scatterlist *sg;
2620 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt); 2621
2621 unsigned char iv[ivsize]; 2622 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2622 skcipher_request_set_tfm(req, ic->journal_crypt); 2623 if (!req) {
2624 *error = "Could not allocate crypt request";
2625 r = -ENOMEM;
2626 goto bad;
2627 }
2628
2629 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2630 if (!crypt_iv) {
2631 *error = "Could not allocate iv";
2632 r = -ENOMEM;
2633 goto bad;
2634 }
2623 2635
2624 ic->journal_xor = dm_integrity_alloc_page_list(ic); 2636 ic->journal_xor = dm_integrity_alloc_page_list(ic);
2625 if (!ic->journal_xor) { 2637 if (!ic->journal_xor) {
@@ -2641,9 +2653,9 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2641 sg_set_buf(&sg[i], va, PAGE_SIZE); 2653 sg_set_buf(&sg[i], va, PAGE_SIZE);
2642 } 2654 }
2643 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); 2655 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2644 memset(iv, 0x00, ivsize); 2656 memset(crypt_iv, 0x00, ivsize);
2645 2657
2646 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, iv); 2658 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2647 init_completion(&comp.comp); 2659 init_completion(&comp.comp);
2648 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2660 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2649 if (do_crypt(true, req, &comp)) 2661 if (do_crypt(true, req, &comp))
@@ -2659,10 +2671,22 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2659 crypto_free_skcipher(ic->journal_crypt); 2671 crypto_free_skcipher(ic->journal_crypt);
2660 ic->journal_crypt = NULL; 2672 ic->journal_crypt = NULL;
2661 } else { 2673 } else {
2662 SKCIPHER_REQUEST_ON_STACK(req, ic->journal_crypt);
2663 unsigned char iv[ivsize];
2664 unsigned crypt_len = roundup(ivsize, blocksize); 2674 unsigned crypt_len = roundup(ivsize, blocksize);
2665 2675
2676 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2677 if (!req) {
2678 *error = "Could not allocate crypt request";
2679 r = -ENOMEM;
2680 goto bad;
2681 }
2682
2683 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2684 if (!crypt_iv) {
2685 *error = "Could not allocate iv";
2686 r = -ENOMEM;
2687 goto bad;
2688 }
2689
2666 crypt_data = kmalloc(crypt_len, GFP_KERNEL); 2690 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2667 if (!crypt_data) { 2691 if (!crypt_data) {
2668 *error = "Unable to allocate crypt data"; 2692 *error = "Unable to allocate crypt data";
@@ -2670,8 +2694,6 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2670 goto bad; 2694 goto bad;
2671 } 2695 }
2672 2696
2673 skcipher_request_set_tfm(req, ic->journal_crypt);
2674
2675 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); 2697 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2676 if (!ic->journal_scatterlist) { 2698 if (!ic->journal_scatterlist) {
2677 *error = "Unable to allocate sg list"; 2699 *error = "Unable to allocate sg list";
@@ -2695,12 +2717,12 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
2695 struct skcipher_request *section_req; 2717 struct skcipher_request *section_req;
2696 __u32 section_le = cpu_to_le32(i); 2718 __u32 section_le = cpu_to_le32(i);
2697 2719
2698 memset(iv, 0x00, ivsize); 2720 memset(crypt_iv, 0x00, ivsize);
2699 memset(crypt_data, 0x00, crypt_len); 2721 memset(crypt_data, 0x00, crypt_len);
2700 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le))); 2722 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
2701 2723
2702 sg_init_one(&sg, crypt_data, crypt_len); 2724 sg_init_one(&sg, crypt_data, crypt_len);
2703 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, iv); 2725 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
2704 init_completion(&comp.comp); 2726 init_completion(&comp.comp);
2705 comp.in_flight = (atomic_t)ATOMIC_INIT(1); 2727 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2706 if (do_crypt(true, req, &comp)) 2728 if (do_crypt(true, req, &comp))
@@ -2758,6 +2780,9 @@ retest_commit_id:
2758 } 2780 }
2759bad: 2781bad:
2760 kfree(crypt_data); 2782 kfree(crypt_data);
2783 kfree(crypt_iv);
2784 skcipher_request_free(req);
2785
2761 return r; 2786 return r;
2762} 2787}
2763 2788
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index d31d18d9727c..36ef284ad086 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -80,10 +80,14 @@
80#define SECTOR_TO_BLOCK_SHIFT 3 80#define SECTOR_TO_BLOCK_SHIFT 3
81 81
82/* 82/*
83 * For btree insert:
83 * 3 for btree insert + 84 * 3 for btree insert +
84 * 2 for btree lookup used within space map 85 * 2 for btree lookup used within space map
86 * For btree remove:
87 * 2 for shadow spine +
88 * 4 for rebalance 3 child node
85 */ 89 */
86#define THIN_MAX_CONCURRENT_LOCKS 5 90#define THIN_MAX_CONCURRENT_LOCKS 6
87 91
88/* This should be plenty */ 92/* This should be plenty */
89#define SPACE_MAP_ROOT_SIZE 128 93#define SPACE_MAP_ROOT_SIZE 128
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index f21ce6a3d4cf..58b319757b1e 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -683,23 +683,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
683 pn->keys[1] = rn->keys[0]; 683 pn->keys[1] = rn->keys[0];
684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64)); 684 memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
685 685
686 /* 686 unlock_block(s->info, left);
687 * rejig the spine. This is ugly, since it knows too 687 unlock_block(s->info, right);
688 * much about the spine
689 */
690 if (s->nodes[0] != new_parent) {
691 unlock_block(s->info, s->nodes[0]);
692 s->nodes[0] = new_parent;
693 }
694 if (key < le64_to_cpu(rn->keys[0])) {
695 unlock_block(s->info, right);
696 s->nodes[1] = left;
697 } else {
698 unlock_block(s->info, left);
699 s->nodes[1] = right;
700 }
701 s->count = 2;
702
703 return 0; 688 return 0;
704} 689}
705 690
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 9b9f3f99b39d..4fee3553e1a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
111 ethtype_mask = 0; 111 ethtype_mask = 0;
112 } 112 }
113 113
114 if (ethtype_key == ETH_P_IPV6)
115 fs->type = 1;
116
114 fs->val.ethtype = ethtype_key; 117 fs->val.ethtype = ethtype_key;
115 fs->mask.ethtype = ethtype_mask; 118 fs->mask.ethtype = ethtype_mask;
116 fs->val.proto = key->ip_proto; 119 fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
205 VLAN_PRIO_SHIFT); 208 VLAN_PRIO_SHIFT);
206 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << 209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
207 VLAN_PRIO_SHIFT); 210 VLAN_PRIO_SHIFT);
208 fs->val.ivlan = cpu_to_be16(vlan_tci); 211 fs->val.ivlan = vlan_tci;
209 fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); 212 fs->mask.ivlan = vlan_tci_mask;
210 213
211 /* Chelsio adapters use ivlan_vld bit to match vlan packets 214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
212 * as 802.1Q. Also, when vlan tag is present in packets, 215 * as 802.1Q. Also, when vlan tag is present in packets,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c6e859a27ee6..e180657a02ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)
4634 4634
4635 be_schedule_worker(adapter); 4635 be_schedule_worker(adapter);
4636 4636
4637 /*
4638 * The IF was destroyed and re-created. We need to clear
4639 * all promiscuous flags valid for the destroyed IF.
4640 * Without this promisc mode is not restored during
4641 * be_open() because the driver thinks that it is
4642 * already enabled in HW.
4643 */
4644 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4645
4637 if (netif_running(netdev)) 4646 if (netif_running(netdev))
4638 status = be_open(netdev); 4647 status = be_open(netdev);
4639 4648
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index be2ce8dece4a..8f2a77ecf4fb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -411,6 +411,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
411 struct ibmvnic_rx_pool *rx_pool; 411 struct ibmvnic_rx_pool *rx_pool;
412 int rx_scrqs; 412 int rx_scrqs;
413 int i, j, rc; 413 int i, j, rc;
414 u64 *size_array;
415
416 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
417 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
414 418
415 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 419 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
416 for (i = 0; i < rx_scrqs; i++) { 420 for (i = 0; i < rx_scrqs; i++) {
@@ -418,7 +422,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
418 422
419 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); 423 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
420 424
421 rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); 425 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
426 free_long_term_buff(adapter, &rx_pool->long_term_buff);
427 rx_pool->buff_size = be64_to_cpu(size_array[i]);
428 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
429 rx_pool->size *
430 rx_pool->buff_size);
431 } else {
432 rc = reset_long_term_buff(adapter,
433 &rx_pool->long_term_buff);
434 }
435
422 if (rc) 436 if (rc)
423 return rc; 437 return rc;
424 438
@@ -440,14 +454,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
440static void release_rx_pools(struct ibmvnic_adapter *adapter) 454static void release_rx_pools(struct ibmvnic_adapter *adapter)
441{ 455{
442 struct ibmvnic_rx_pool *rx_pool; 456 struct ibmvnic_rx_pool *rx_pool;
443 int rx_scrqs;
444 int i, j; 457 int i, j;
445 458
446 if (!adapter->rx_pool) 459 if (!adapter->rx_pool)
447 return; 460 return;
448 461
449 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); 462 for (i = 0; i < adapter->num_active_rx_pools; i++) {
450 for (i = 0; i < rx_scrqs; i++) {
451 rx_pool = &adapter->rx_pool[i]; 463 rx_pool = &adapter->rx_pool[i];
452 464
453 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); 465 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -470,6 +482,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
470 482
471 kfree(adapter->rx_pool); 483 kfree(adapter->rx_pool);
472 adapter->rx_pool = NULL; 484 adapter->rx_pool = NULL;
485 adapter->num_active_rx_pools = 0;
473} 486}
474 487
475static int init_rx_pools(struct net_device *netdev) 488static int init_rx_pools(struct net_device *netdev)
@@ -494,6 +507,8 @@ static int init_rx_pools(struct net_device *netdev)
494 return -1; 507 return -1;
495 } 508 }
496 509
510 adapter->num_active_rx_pools = 0;
511
497 for (i = 0; i < rxadd_subcrqs; i++) { 512 for (i = 0; i < rxadd_subcrqs; i++) {
498 rx_pool = &adapter->rx_pool[i]; 513 rx_pool = &adapter->rx_pool[i];
499 514
@@ -537,6 +552,8 @@ static int init_rx_pools(struct net_device *netdev)
537 rx_pool->next_free = 0; 552 rx_pool->next_free = 0;
538 } 553 }
539 554
555 adapter->num_active_rx_pools = rxadd_subcrqs;
556
540 return 0; 557 return 0;
541} 558}
542 559
@@ -587,13 +604,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
587static void release_tx_pools(struct ibmvnic_adapter *adapter) 604static void release_tx_pools(struct ibmvnic_adapter *adapter)
588{ 605{
589 struct ibmvnic_tx_pool *tx_pool; 606 struct ibmvnic_tx_pool *tx_pool;
590 int i, tx_scrqs; 607 int i;
591 608
592 if (!adapter->tx_pool) 609 if (!adapter->tx_pool)
593 return; 610 return;
594 611
595 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); 612 for (i = 0; i < adapter->num_active_tx_pools; i++) {
596 for (i = 0; i < tx_scrqs; i++) {
597 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); 613 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
598 tx_pool = &adapter->tx_pool[i]; 614 tx_pool = &adapter->tx_pool[i];
599 kfree(tx_pool->tx_buff); 615 kfree(tx_pool->tx_buff);
@@ -604,6 +620,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
604 620
605 kfree(adapter->tx_pool); 621 kfree(adapter->tx_pool);
606 adapter->tx_pool = NULL; 622 adapter->tx_pool = NULL;
623 adapter->num_active_tx_pools = 0;
607} 624}
608 625
609static int init_tx_pools(struct net_device *netdev) 626static int init_tx_pools(struct net_device *netdev)
@@ -620,6 +637,8 @@ static int init_tx_pools(struct net_device *netdev)
620 if (!adapter->tx_pool) 637 if (!adapter->tx_pool)
621 return -1; 638 return -1;
622 639
640 adapter->num_active_tx_pools = 0;
641
623 for (i = 0; i < tx_subcrqs; i++) { 642 for (i = 0; i < tx_subcrqs; i++) {
624 tx_pool = &adapter->tx_pool[i]; 643 tx_pool = &adapter->tx_pool[i];
625 644
@@ -667,6 +686,8 @@ static int init_tx_pools(struct net_device *netdev)
667 tx_pool->producer_index = 0; 686 tx_pool->producer_index = 0;
668 } 687 }
669 688
689 adapter->num_active_tx_pools = tx_subcrqs;
690
670 return 0; 691 return 0;
671} 692}
672 693
@@ -861,7 +882,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
861 if (adapter->vpd->buff) 882 if (adapter->vpd->buff)
862 len = adapter->vpd->len; 883 len = adapter->vpd->len;
863 884
864 reinit_completion(&adapter->fw_done); 885 init_completion(&adapter->fw_done);
865 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; 886 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
866 crq.get_vpd_size.cmd = GET_VPD_SIZE; 887 crq.get_vpd_size.cmd = GET_VPD_SIZE;
867 ibmvnic_send_crq(adapter, &crq); 888 ibmvnic_send_crq(adapter, &crq);
@@ -923,6 +944,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
923 if (!adapter->vpd) 944 if (!adapter->vpd)
924 return -ENOMEM; 945 return -ENOMEM;
925 946
947 /* Vital Product Data (VPD) */
948 rc = ibmvnic_get_vpd(adapter);
949 if (rc) {
950 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
951 return rc;
952 }
953
926 adapter->map_id = 1; 954 adapter->map_id = 1;
927 adapter->napi = kcalloc(adapter->req_rx_queues, 955 adapter->napi = kcalloc(adapter->req_rx_queues,
928 sizeof(struct napi_struct), GFP_KERNEL); 956 sizeof(struct napi_struct), GFP_KERNEL);
@@ -996,7 +1024,7 @@ static int __ibmvnic_open(struct net_device *netdev)
996static int ibmvnic_open(struct net_device *netdev) 1024static int ibmvnic_open(struct net_device *netdev)
997{ 1025{
998 struct ibmvnic_adapter *adapter = netdev_priv(netdev); 1026 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
999 int rc, vpd; 1027 int rc;
1000 1028
1001 mutex_lock(&adapter->reset_lock); 1029 mutex_lock(&adapter->reset_lock);
1002 1030
@@ -1019,11 +1047,6 @@ static int ibmvnic_open(struct net_device *netdev)
1019 rc = __ibmvnic_open(netdev); 1047 rc = __ibmvnic_open(netdev);
1020 netif_carrier_on(netdev); 1048 netif_carrier_on(netdev);
1021 1049
1022 /* Vital Product Data (VPD) */
1023 vpd = ibmvnic_get_vpd(adapter);
1024 if (vpd)
1025 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1026
1027 mutex_unlock(&adapter->reset_lock); 1050 mutex_unlock(&adapter->reset_lock);
1028 1051
1029 return rc; 1052 return rc;
@@ -1553,6 +1576,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1553static int do_reset(struct ibmvnic_adapter *adapter, 1576static int do_reset(struct ibmvnic_adapter *adapter,
1554 struct ibmvnic_rwi *rwi, u32 reset_state) 1577 struct ibmvnic_rwi *rwi, u32 reset_state)
1555{ 1578{
1579 u64 old_num_rx_queues, old_num_tx_queues;
1556 struct net_device *netdev = adapter->netdev; 1580 struct net_device *netdev = adapter->netdev;
1557 int i, rc; 1581 int i, rc;
1558 1582
@@ -1562,6 +1586,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1562 netif_carrier_off(netdev); 1586 netif_carrier_off(netdev);
1563 adapter->reset_reason = rwi->reset_reason; 1587 adapter->reset_reason = rwi->reset_reason;
1564 1588
1589 old_num_rx_queues = adapter->req_rx_queues;
1590 old_num_tx_queues = adapter->req_tx_queues;
1591
1565 if (rwi->reset_reason == VNIC_RESET_MOBILITY) { 1592 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1566 rc = ibmvnic_reenable_crq_queue(adapter); 1593 rc = ibmvnic_reenable_crq_queue(adapter);
1567 if (rc) 1594 if (rc)
@@ -1606,6 +1633,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1606 rc = init_resources(adapter); 1633 rc = init_resources(adapter);
1607 if (rc) 1634 if (rc)
1608 return rc; 1635 return rc;
1636 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1637 adapter->req_tx_queues != old_num_tx_queues) {
1638 release_rx_pools(adapter);
1639 release_tx_pools(adapter);
1640 init_rx_pools(netdev);
1641 init_tx_pools(netdev);
1609 } else { 1642 } else {
1610 rc = reset_tx_pools(adapter); 1643 rc = reset_tx_pools(adapter);
1611 if (rc) 1644 if (rc)
@@ -3603,7 +3636,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3603 *req_value, 3636 *req_value,
3604 (long int)be64_to_cpu(crq->request_capability_rsp. 3637 (long int)be64_to_cpu(crq->request_capability_rsp.
3605 number), name); 3638 number), name);
3606 *req_value = be64_to_cpu(crq->request_capability_rsp.number); 3639
3640 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3641 REQ_MTU) {
3642 pr_err("mtu of %llu is not supported. Reverting.\n",
3643 *req_value);
3644 *req_value = adapter->fallback.mtu;
3645 } else {
3646 *req_value =
3647 be64_to_cpu(crq->request_capability_rsp.number);
3648 }
3649
3607 ibmvnic_send_req_caps(adapter, 1); 3650 ibmvnic_send_req_caps(adapter, 1);
3608 return; 3651 return;
3609 default: 3652 default:
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 2df79fdd800b..fe21a6e2ddae 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
1091 u64 opt_rxba_entries_per_subcrq; 1091 u64 opt_rxba_entries_per_subcrq;
1092 __be64 tx_rx_desc_req; 1092 __be64 tx_rx_desc_req;
1093 u8 map_id; 1093 u8 map_id;
1094 u64 num_active_rx_pools;
1095 u64 num_active_tx_pools;
1094 1096
1095 struct tasklet_struct tasklet; 1097 struct tasklet_struct tasklet;
1096 enum vnic_state state; 1098 enum vnic_state state;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index d56fe32bf48d..8a22ff67b026 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -457,12 +457,10 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
457void usbnet_defer_kevent (struct usbnet *dev, int work) 457void usbnet_defer_kevent (struct usbnet *dev, int work)
458{ 458{
459 set_bit (work, &dev->flags); 459 set_bit (work, &dev->flags);
460 if (!schedule_work (&dev->kevent)) { 460 if (!schedule_work (&dev->kevent))
461 if (net_ratelimit()) 461 netdev_dbg(dev->net, "kevent %d may have been dropped\n", work);
462 netdev_err(dev->net, "kevent %d may have been dropped\n", work); 462 else
463 } else {
464 netdev_dbg(dev->net, "kevent %d scheduled\n", work); 463 netdev_dbg(dev->net, "kevent %d scheduled\n", work);
465 }
466} 464}
467EXPORT_SYMBOL_GPL(usbnet_defer_kevent); 465EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
468 466
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 58476b728c57..c9406852c3e9 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -486,15 +486,28 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
486 486
487int sas_eh_abort_handler(struct scsi_cmnd *cmd) 487int sas_eh_abort_handler(struct scsi_cmnd *cmd)
488{ 488{
489 int res; 489 int res = TMF_RESP_FUNC_FAILED;
490 struct sas_task *task = TO_SAS_TASK(cmd); 490 struct sas_task *task = TO_SAS_TASK(cmd);
491 struct Scsi_Host *host = cmd->device->host; 491 struct Scsi_Host *host = cmd->device->host;
492 struct domain_device *dev = cmd_to_domain_dev(cmd);
492 struct sas_internal *i = to_sas_internal(host->transportt); 493 struct sas_internal *i = to_sas_internal(host->transportt);
494 unsigned long flags;
493 495
494 if (!i->dft->lldd_abort_task) 496 if (!i->dft->lldd_abort_task)
495 return FAILED; 497 return FAILED;
496 498
497 res = i->dft->lldd_abort_task(task); 499 spin_lock_irqsave(host->host_lock, flags);
500 /* We cannot do async aborts for SATA devices */
501 if (dev_is_sata(dev) && !host->host_eh_scheduled) {
502 spin_unlock_irqrestore(host->host_lock, flags);
503 return FAILED;
504 }
505 spin_unlock_irqrestore(host->host_lock, flags);
506
507 if (task)
508 res = i->dft->lldd_abort_task(task);
509 else
510 SAS_DPRINTK("no task to abort\n");
498 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 511 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
499 return SUCCESS; 512 return SUCCESS;
500 513
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index f650e475d8f0..fdf2aad73470 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -60,10 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
60 gi->gid[i] = exp->ex_anon_gid; 60 gi->gid[i] = exp->ex_anon_gid;
61 else 61 else
62 gi->gid[i] = rqgi->gid[i]; 62 gi->gid[i] = rqgi->gid[i];
63
64 /* Each thread allocates its own gi, no race */
65 groups_sort(gi);
66 } 63 }
64
65 /* Each thread allocates its own gi, no race */
66 groups_sort(gi);
67 } else { 67 } else {
68 gi = get_group_info(rqgi); 68 gi = get_group_info(rqgi);
69 } 69 }
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index ded456f17de6..c584ad8d023c 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -162,7 +162,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
162 struct orangefs_kernel_op_s *op, *temp; 162 struct orangefs_kernel_op_s *op, *temp;
163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION; 163 __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC; 164 static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
165 struct orangefs_kernel_op_s *cur_op = NULL; 165 struct orangefs_kernel_op_s *cur_op;
166 unsigned long ret; 166 unsigned long ret;
167 167
168 /* We do not support blocking IO. */ 168 /* We do not support blocking IO. */
@@ -186,6 +186,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
186 return -EAGAIN; 186 return -EAGAIN;
187 187
188restart: 188restart:
189 cur_op = NULL;
189 /* Get next op (if any) from top of list. */ 190 /* Get next op (if any) from top of list. */
190 spin_lock(&orangefs_request_list_lock); 191 spin_lock(&orangefs_request_list_lock);
191 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) { 192 list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
index 835c6e148afc..0577d6dba8c8 100644
--- a/fs/orangefs/waitqueue.c
+++ b/fs/orangefs/waitqueue.c
@@ -29,10 +29,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
29 */ 29 */
30void purge_waiting_ops(void) 30void purge_waiting_ops(void)
31{ 31{
32 struct orangefs_kernel_op_s *op; 32 struct orangefs_kernel_op_s *op, *tmp;
33 33
34 spin_lock(&orangefs_request_list_lock); 34 spin_lock(&orangefs_request_list_lock);
35 list_for_each_entry(op, &orangefs_request_list, list) { 35 list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
36 gossip_debug(GOSSIP_WAIT_DEBUG, 36 gossip_debug(GOSSIP_WAIT_DEBUG,
37 "pvfs2-client-core: purging op tag %llu %s\n", 37 "pvfs2-client-core: purging op tag %llu %s\n",
38 llu(op->tag), 38 llu(op->tag),
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 9c5a2628d6ce..1d3877c39a00 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -124,6 +124,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE); 124 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
125} 125}
126 126
127static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
128{
129 return swp_offset(entry);
130}
131
127static inline struct page *device_private_entry_to_page(swp_entry_t entry) 132static inline struct page *device_private_entry_to_page(swp_entry_t entry)
128{ 133{
129 return pfn_to_page(swp_offset(entry)); 134 return pfn_to_page(swp_offset(entry));
@@ -154,6 +159,11 @@ static inline bool is_write_device_private_entry(swp_entry_t entry)
154 return false; 159 return false;
155} 160}
156 161
162static inline unsigned long device_private_entry_to_pfn(swp_entry_t entry)
163{
164 return 0;
165}
166
157static inline struct page *device_private_entry_to_page(swp_entry_t entry) 167static inline struct page *device_private_entry_to_page(swp_entry_t entry)
158{ 168{
159 return NULL; 169 return NULL;
@@ -189,6 +199,11 @@ static inline int is_write_migration_entry(swp_entry_t entry)
189 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE); 199 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
190} 200}
191 201
202static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
203{
204 return swp_offset(entry);
205}
206
192static inline struct page *migration_entry_to_page(swp_entry_t entry) 207static inline struct page *migration_entry_to_page(swp_entry_t entry)
193{ 208{
194 struct page *p = pfn_to_page(swp_offset(entry)); 209 struct page *p = pfn_to_page(swp_offset(entry));
@@ -218,6 +233,12 @@ static inline int is_migration_entry(swp_entry_t swp)
218{ 233{
219 return 0; 234 return 0;
220} 235}
236
237static inline unsigned long migration_entry_to_pfn(swp_entry_t entry)
238{
239 return 0;
240}
241
221static inline struct page *migration_entry_to_page(swp_entry_t entry) 242static inline struct page *migration_entry_to_page(swp_entry_t entry)
222{ 243{
223 return NULL; 244 return NULL;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 496e59a2738b..8fb90a0819c3 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -932,6 +932,8 @@ struct kvm_ppc_resize_hpt {
932#define KVM_CAP_HYPERV_SYNIC2 148 932#define KVM_CAP_HYPERV_SYNIC2 148
933#define KVM_CAP_HYPERV_VP_INDEX 149 933#define KVM_CAP_HYPERV_VP_INDEX 149
934#define KVM_CAP_S390_AIS_MIGRATION 150 934#define KVM_CAP_S390_AIS_MIGRATION 150
935#define KVM_CAP_PPC_GET_CPU_CHAR 151
936#define KVM_CAP_S390_BPB 152
935 937
936#ifdef KVM_CAP_IRQ_ROUTING 938#ifdef KVM_CAP_IRQ_ROUTING
937 939
@@ -1261,6 +1263,8 @@ struct kvm_s390_ucas_mapping {
1261#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg) 1263#define KVM_PPC_CONFIGURE_V3_MMU _IOW(KVMIO, 0xaf, struct kvm_ppc_mmuv3_cfg)
1262/* Available with KVM_CAP_PPC_RADIX_MMU */ 1264/* Available with KVM_CAP_PPC_RADIX_MMU */
1263#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info) 1265#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
1266/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
1267#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
1264 1268
1265/* ioctl for vm fd */ 1269/* ioctl for vm fd */
1266#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) 1270#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 0ba0dd8863a7..5187dfe809ac 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -321,15 +321,23 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, 321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
322 bool reserved, unsigned int *mapped_cpu) 322 bool reserved, unsigned int *mapped_cpu)
323{ 323{
324 unsigned int cpu; 324 unsigned int cpu, best_cpu, maxavl = 0;
325 struct cpumap *cm;
326 unsigned int bit;
325 327
328 best_cpu = UINT_MAX;
326 for_each_cpu(cpu, msk) { 329 for_each_cpu(cpu, msk) {
327 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); 330 cm = per_cpu_ptr(m->maps, cpu);
328 unsigned int bit;
329 331
330 if (!cm->online) 332 if (!cm->online || cm->available <= maxavl)
331 continue; 333 continue;
332 334
335 best_cpu = cpu;
336 maxavl = cm->available;
337 }
338
339 if (maxavl) {
340 cm = per_cpu_ptr(m->maps, best_cpu);
333 bit = matrix_alloc_area(m, cm, 1, false); 341 bit = matrix_alloc_area(m, cm, 1, false);
334 if (bit < m->alloc_end) { 342 if (bit < m->alloc_end) {
335 cm->allocated++; 343 cm->allocated++;
@@ -338,8 +346,8 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
338 m->global_available--; 346 m->global_available--;
339 if (reserved) 347 if (reserved)
340 m->global_reserved--; 348 m->global_reserved--;
341 *mapped_cpu = cpu; 349 *mapped_cpu = best_cpu;
342 trace_irq_matrix_alloc(bit, cpu, m, cm); 350 trace_irq_matrix_alloc(bit, best_cpu, m, cm);
343 return bit; 351 return bit;
344 } 352 }
345 } 353 }
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index d22b84310f6d..ae3c2a35d61b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -30,10 +30,37 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
30 return true; 30 return true;
31} 31}
32 32
33static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
34{
35 unsigned long hpage_pfn = page_to_pfn(hpage);
36
37 /* THP can be referenced by any subpage */
38 return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
39}
40
41/**
42 * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
43 *
44 * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
45 * mapped. check_pte() has to validate this.
46 *
47 * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
48 * page.
49 *
50 * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
51 * entry that points to @pvmw->page or any subpage in case of THP.
52 *
53 * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
54 * @pvmw->page or any subpage in case of THP.
55 *
56 * Otherwise, return false.
57 *
58 */
33static bool check_pte(struct page_vma_mapped_walk *pvmw) 59static bool check_pte(struct page_vma_mapped_walk *pvmw)
34{ 60{
61 unsigned long pfn;
62
35 if (pvmw->flags & PVMW_MIGRATION) { 63 if (pvmw->flags & PVMW_MIGRATION) {
36#ifdef CONFIG_MIGRATION
37 swp_entry_t entry; 64 swp_entry_t entry;
38 if (!is_swap_pte(*pvmw->pte)) 65 if (!is_swap_pte(*pvmw->pte))
39 return false; 66 return false;
@@ -41,38 +68,25 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
41 68
42 if (!is_migration_entry(entry)) 69 if (!is_migration_entry(entry))
43 return false; 70 return false;
44 if (migration_entry_to_page(entry) - pvmw->page >=
45 hpage_nr_pages(pvmw->page)) {
46 return false;
47 }
48 if (migration_entry_to_page(entry) < pvmw->page)
49 return false;
50#else
51 WARN_ON_ONCE(1);
52#endif
53 } else {
54 if (is_swap_pte(*pvmw->pte)) {
55 swp_entry_t entry;
56 71
57 entry = pte_to_swp_entry(*pvmw->pte); 72 pfn = migration_entry_to_pfn(entry);
58 if (is_device_private_entry(entry) && 73 } else if (is_swap_pte(*pvmw->pte)) {
59 device_private_entry_to_page(entry) == pvmw->page) 74 swp_entry_t entry;
60 return true;
61 }
62 75
63 if (!pte_present(*pvmw->pte)) 76 /* Handle un-addressable ZONE_DEVICE memory */
77 entry = pte_to_swp_entry(*pvmw->pte);
78 if (!is_device_private_entry(entry))
64 return false; 79 return false;
65 80
66 /* THP can be referenced by any subpage */ 81 pfn = device_private_entry_to_pfn(entry);
67 if (pte_page(*pvmw->pte) - pvmw->page >= 82 } else {
68 hpage_nr_pages(pvmw->page)) { 83 if (!pte_present(*pvmw->pte))
69 return false;
70 }
71 if (pte_page(*pvmw->pte) < pvmw->page)
72 return false; 84 return false;
85
86 pfn = pte_pfn(*pvmw->pte);
73 } 87 }
74 88
75 return true; 89 return pfn_in_hpage(pvmw->page, pfn);
76} 90}
77 91
78/** 92/**
diff --git a/net/core/dev.c b/net/core/dev.c
index 77795f66c246..4670ccabe23a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3166,10 +3166,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
3166 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 3166 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3167 3167
3168 /* + transport layer */ 3168 /* + transport layer */
3169 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 3169 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3170 hdr_len += tcp_hdrlen(skb); 3170 const struct tcphdr *th;
3171 else 3171 struct tcphdr _tcphdr;
3172 hdr_len += sizeof(struct udphdr); 3172
3173 th = skb_header_pointer(skb, skb_transport_offset(skb),
3174 sizeof(_tcphdr), &_tcphdr);
3175 if (likely(th))
3176 hdr_len += __tcp_hdrlen(th);
3177 } else {
3178 struct udphdr _udphdr;
3179
3180 if (skb_header_pointer(skb, skb_transport_offset(skb),
3181 sizeof(_udphdr), &_udphdr))
3182 hdr_len += sizeof(struct udphdr);
3183 }
3173 3184
3174 if (shinfo->gso_type & SKB_GSO_DODGY) 3185 if (shinfo->gso_type & SKB_GSO_DODGY)
3175 gso_segs = DIV_ROUND_UP(skb->len - hdr_len, 3186 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 32fbd9ba3609..da5635fc52c2 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -118,6 +118,9 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
118 if (!xo) 118 if (!xo)
119 return ERR_PTR(-EINVAL); 119 return ERR_PTR(-EINVAL);
120 120
121 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
122 return ERR_PTR(-EINVAL);
123
121 x = skb->sp->xvec[skb->sp->len - 1]; 124 x = skb->sp->xvec[skb->sp->len - 1];
122 aead = x->data; 125 aead = x->data;
123 esph = ip_esp_hdr(skb); 126 esph = ip_esp_hdr(skb);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 02f00be12bb0..10f7f74a0831 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
332 return htonl(INADDR_ANY); 332 return htonl(INADDR_ANY);
333 333
334 for_ifa(in_dev) { 334 for_ifa(in_dev) {
335 if (inet_ifa_match(fl4->saddr, ifa)) 335 if (fl4->saddr == ifa->ifa_local)
336 return fl4->saddr; 336 return fl4->saddr;
337 } endfor_ifa(in_dev); 337 } endfor_ifa(in_dev);
338 338
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index b6a2aa1dcf56..4d58e2ce0b5b 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features) 33 netdev_features_t features)
34{ 34{
35 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36 return ERR_PTR(-EINVAL);
37
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) 38 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL); 39 return ERR_PTR(-EINVAL);
37 40
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 01801b77bd0d..ea6e6e7df0ee 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -203,6 +203,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
203 goto out; 203 goto out;
204 } 204 }
205 205
206 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
207 goto out;
208
206 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 209 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
207 goto out; 210 goto out;
208 211
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 44d109c435bc..3fd1ec775dc2 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -145,6 +145,9 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
145 if (!xo) 145 if (!xo)
146 return ERR_PTR(-EINVAL); 146 return ERR_PTR(-EINVAL);
147 147
148 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
149 return ERR_PTR(-EINVAL);
150
148 x = skb->sp->xvec[skb->sp->len - 1]; 151 x = skb->sp->xvec[skb->sp->len - 1];
149 aead = x->data; 152 aead = x->data;
150 esph = ip_esp_hdr(skb); 153 esph = ip_esp_hdr(skb);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index d883c9204c01..278e49cd67d4 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
46{ 46{
47 struct tcphdr *th; 47 struct tcphdr *th;
48 48
49 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
50 return ERR_PTR(-EINVAL);
51
49 if (!pskb_may_pull(skb, sizeof(*th))) 52 if (!pskb_may_pull(skb, sizeof(*th)))
50 return ERR_PTR(-EINVAL); 53 return ERR_PTR(-EINVAL);
51 54
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index a0f89ad76f9d..2a04dc9c781b 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -42,6 +42,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
42 const struct ipv6hdr *ipv6h; 42 const struct ipv6hdr *ipv6h;
43 struct udphdr *uh; 43 struct udphdr *uh;
44 44
45 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
46 goto out;
47
45 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 48 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
46 goto out; 49 goto out;
47 50
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 2e554ef6d75f..9920d2f84eff 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -90,9 +90,10 @@ void rds_tcp_nonagle(struct socket *sock)
90 sizeof(val)); 90 sizeof(val));
91} 91}
92 92
93u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc) 93u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
94{ 94{
95 return tcp_sk(tc->t_sock->sk)->snd_nxt; 95 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
96} 97}
97 98
98u32 rds_tcp_snd_una(struct rds_tcp_connection *tc) 99u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index e7858ee8ed8b..c6fa080e9b6d 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -55,7 +55,7 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
55void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp); 55void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
56void rds_tcp_restore_callbacks(struct socket *sock, 56void rds_tcp_restore_callbacks(struct socket *sock,
57 struct rds_tcp_connection *tc); 57 struct rds_tcp_connection *tc);
58u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); 58u32 rds_tcp_write_seq(struct rds_tcp_connection *tc);
59u32 rds_tcp_snd_una(struct rds_tcp_connection *tc); 59u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
60u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq); 60u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
61extern struct rds_transport rds_tcp_transport; 61extern struct rds_transport rds_tcp_transport;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 73c74763ca72..16f65744d984 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -86,7 +86,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
86 * m_ack_seq is set to the sequence number of the last byte of 86 * m_ack_seq is set to the sequence number of the last byte of
87 * header and data. see rds_tcp_is_acked(). 87 * header and data. see rds_tcp_is_acked().
88 */ 88 */
89 tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc); 89 tc->t_last_sent_nxt = rds_tcp_write_seq(tc);
90 rm->m_ack_seq = tc->t_last_sent_nxt + 90 rm->m_ack_seq = tc->t_last_sent_nxt +
91 sizeof(struct rds_header) + 91 sizeof(struct rds_header) +
92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1; 92 be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
@@ -98,7 +98,7 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; 98 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
99 99
100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n", 100 rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
101 rm, rds_tcp_snd_nxt(tc), 101 rm, rds_tcp_write_seq(tc),
102 (unsigned long long)rm->m_ack_seq); 102 (unsigned long long)rm->m_ack_seq);
103 } 103 }
104 104
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 275925b93b29..35bc7106d182 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -45,6 +45,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
45 struct sk_buff *segs = ERR_PTR(-EINVAL); 45 struct sk_buff *segs = ERR_PTR(-EINVAL);
46 struct sctphdr *sh; 46 struct sctphdr *sh;
47 47
48 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
49 goto out;
50
48 sh = sctp_hdr(skb); 51 sh = sctp_hdr(skb);
49 if (!pskb_may_pull(skb, sizeof(*sh))) 52 if (!pskb_may_pull(skb, sizeof(*sh)))
50 goto out; 53 goto out;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 61f394d369bf..0a9b72fbd761 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -577,6 +577,8 @@ alloc_payload:
577 get_page(page); 577 get_page(page);
578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; 578 sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem;
579 sg_set_page(sg, page, copy, offset); 579 sg_set_page(sg, page, copy, offset);
580 sg_unmark_end(sg);
581
580 ctx->sg_plaintext_num_elem++; 582 ctx->sg_plaintext_num_elem++;
581 583
582 sk_mem_charge(sk, copy); 584 sk_mem_charge(sk, copy);
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index b4b69c2d1012..9dea96380339 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1310,7 +1310,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1310 return -EFAULT; 1310 return -EFAULT;
1311 } 1311 }
1312 1312
1313 if (is_vm_hugetlb_page(vma) && !logging_active) { 1313 if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
1314 hugetlb = true; 1314 hugetlb = true;
1315 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; 1315 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1316 } else { 1316 } else {
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 62310122ee78..743ca5cb05ef 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -285,9 +285,11 @@ int vgic_init(struct kvm *kvm)
285 if (ret) 285 if (ret)
286 goto out; 286 goto out;
287 287
288 ret = vgic_v4_init(kvm); 288 if (vgic_has_its(kvm)) {
289 if (ret) 289 ret = vgic_v4_init(kvm);
290 goto out; 290 if (ret)
291 goto out;
292 }
291 293
292 kvm_for_each_vcpu(i, vcpu, kvm) 294 kvm_for_each_vcpu(i, vcpu, kvm)
293 kvm_vgic_vcpu_enable(vcpu); 295 kvm_vgic_vcpu_enable(vcpu);
diff --git a/virt/kvm/arm/vgic/vgic-v4.c b/virt/kvm/arm/vgic/vgic-v4.c
index 4a37292855bc..bc4265154bac 100644
--- a/virt/kvm/arm/vgic/vgic-v4.c
+++ b/virt/kvm/arm/vgic/vgic-v4.c
@@ -118,7 +118,7 @@ int vgic_v4_init(struct kvm *kvm)
118 struct kvm_vcpu *vcpu; 118 struct kvm_vcpu *vcpu;
119 int i, nr_vcpus, ret; 119 int i, nr_vcpus, ret;
120 120
121 if (!vgic_supports_direct_msis(kvm)) 121 if (!kvm_vgic_global_state.has_gicv4)
122 return 0; /* Nothing to see here... move along. */ 122 return 0; /* Nothing to see here... move along. */
123 123
124 if (dist->its_vm.vpes) 124 if (dist->its_vm.vpes)