aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/x86/intel_mpx.txt6
-rw-r--r--Documentation/x86/tlb.txt4
-rw-r--r--Documentation/x86/x86_64/machinecheck2
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_errata.c6
-rw-r--r--arch/arm64/kernel/entry.S19
-rw-r--r--arch/arm64/mm/fault.c3
-rw-r--r--arch/x86/events/core.c11
-rw-r--r--arch/x86/events/intel/Makefile4
-rw-r--r--arch/x86/events/intel/core.c29
-rw-r--r--arch/x86/kernel/amd_nb.c4
-rw-r--r--arch/x86/pci/acpi.c1
-rw-r--r--arch/x86/power/hibernate_64.c97
-rw-r--r--arch/x86/power/hibernate_asm_64.S55
-rw-r--r--block/ioprio.c2
-rw-r--r--drivers/acpi/acpi_dbg.c4
-rw-r--r--drivers/acpi/acpica/nsload.c7
-rw-r--r--drivers/acpi/acpica/nsparse.c9
-rw-r--r--drivers/acpi/pci_link.c63
-rw-r--r--drivers/block/xen-blkfront.c91
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-sch.c21
-rw-r--r--drivers/gpio/gpiolib-legacy.c8
-rw-r--r--drivers/gpio/gpiolib.c52
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/irqchip/irq-mips-gic.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c11
-rw-r--r--drivers/net/bonding/bond_alb.c7
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c3
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/geneve.c9
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/phy/dp83867.c13
-rw-r--r--drivers/net/usb/cdc_ncm.c7
-rw-r--r--drivers/net/usb/r8152.c35
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c8
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/xen/xen-acpi-processor.c35
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c14
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c10
-rw-r--r--fs/configfs/file.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--include/acpi/acpi_drivers.h1
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/skbuff.h20
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/ip.h5
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/sched/fair.c42
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/core/flow_dissector.c43
-rw-r--r--net/core/skbuff.c18
-rw-r--r--net/decnet/dn_fib.c21
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/tcp.c5
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/tipc/netlink_compat.c2
-rw-r--r--security/apparmor/lsm.c36
-rw-r--r--sound/core/timer.c2
-rw-r--r--sound/pci/au88x0/au88x0_core.c5
-rw-r--r--sound/pci/echoaudio/echoaudio.c4
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_intel.c6
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/Kconfig7
-rw-r--r--sound/soc/codecs/ak4613.c2
-rw-r--r--sound/soc/codecs/cx20442.c1
-rw-r--r--sound/soc/codecs/hdac_hdmi.c20
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5670.c2
-rw-r--r--sound/soc/codecs/wm5102.c2
-rw-r--r--sound/soc/codecs/wm5110.c1
-rw-r--r--sound/soc/codecs/wm8940.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c56
-rw-r--r--sound/soc/davinci/davinci-mcasp.h4
-rw-r--r--sound/soc/fsl/fsl_ssi.c12
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-compress.c9
-rw-r--r--sound/soc/intel/skylake/bxt-sst.c1
-rw-r--r--sound/soc/sh/rcar/adg.c2
111 files changed, 1079 insertions, 553 deletions
diff --git a/Documentation/x86/intel_mpx.txt b/Documentation/x86/intel_mpx.txt
index 1a5a12184a35..85d0549ad846 100644
--- a/Documentation/x86/intel_mpx.txt
+++ b/Documentation/x86/intel_mpx.txt
@@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
45 MPX-instrumented. 45 MPX-instrumented.
463) The kernel detects that the CPU has MPX, allows the new prctl() to 463) The kernel detects that the CPU has MPX, allows the new prctl() to
47 succeed, and notes the location of the bounds directory. Userspace is 47 succeed, and notes the location of the bounds directory. Userspace is
48 expected to keep the bounds directory at that locationWe note it 48 expected to keep the bounds directory at that location. We note it
49 instead of reading it each time because the 'xsave' operation needed 49 instead of reading it each time because the 'xsave' operation needed
50 to access the bounds directory register is an expensive operation. 50 to access the bounds directory register is an expensive operation.
514) If the application needs to spill bounds out of the 4 registers, it 514) If the application needs to spill bounds out of the 4 registers, it
@@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
167We need to decode MPX instructions to get violation address and 167We need to decode MPX instructions to get violation address and
168set this address into extended struct siginfo. 168set this address into extended struct siginfo.
169 169
170The _sigfault feild of struct siginfo is extended as follow: 170The _sigfault field of struct siginfo is extended as follow:
171 171
17287 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 17287 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
17388 struct { 17388 struct {
@@ -240,5 +240,5 @@ them at the same bounds table.
240This is allowed architecturally. See more information "Intel(R) Architecture 240This is allowed architecturally. See more information "Intel(R) Architecture
241Instruction Set Extensions Programming Reference" (9.3.4). 241Instruction Set Extensions Programming Reference" (9.3.4).
242 242
243However, if users did this, the kernel might be fooled in to unmaping an 243However, if users did this, the kernel might be fooled in to unmapping an
244in-use bounds table since it does not recognize sharing. 244in-use bounds table since it does not recognize sharing.
diff --git a/Documentation/x86/tlb.txt b/Documentation/x86/tlb.txt
index 39d172326703..6a0607b99ed8 100644
--- a/Documentation/x86/tlb.txt
+++ b/Documentation/x86/tlb.txt
@@ -5,7 +5,7 @@ memory, it has two choices:
5 from areas other than the one we are trying to flush will be 5 from areas other than the one we are trying to flush will be
6 destroyed and must be refilled later, at some cost. 6 destroyed and must be refilled later, at some cost.
7 2. Use the invlpg instruction to invalidate a single page at a 7 2. Use the invlpg instruction to invalidate a single page at a
8 time. This could potentialy cost many more instructions, but 8 time. This could potentially cost many more instructions, but
9 it is a much more precise operation, causing no collateral 9 it is a much more precise operation, causing no collateral
10 damage to other TLB entries. 10 damage to other TLB entries.
11 11
@@ -19,7 +19,7 @@ Which method to do depends on a few things:
19 work. 19 work.
20 3. The size of the TLB. The larger the TLB, the more collateral 20 3. The size of the TLB. The larger the TLB, the more collateral
21 damage we do with a full flush. So, the larger the TLB, the 21 damage we do with a full flush. So, the larger the TLB, the
22 more attrative an individual flush looks. Data and 22 more attractive an individual flush looks. Data and
23 instructions have separate TLBs, as do different page sizes. 23 instructions have separate TLBs, as do different page sizes.
24 4. The microarchitecture. The TLB has become a multi-level 24 4. The microarchitecture. The TLB has become a multi-level
25 cache on modern CPUs, and the global flushes have become more 25 cache on modern CPUs, and the global flushes have become more
diff --git a/Documentation/x86/x86_64/machinecheck b/Documentation/x86/x86_64/machinecheck
index b1fb30273286..d0648a74fceb 100644
--- a/Documentation/x86/x86_64/machinecheck
+++ b/Documentation/x86/x86_64/machinecheck
@@ -36,7 +36,7 @@ between all CPUs.
36 36
37check_interval 37check_interval
38 How often to poll for corrected machine check errors, in seconds 38 How often to poll for corrected machine check errors, in seconds
39 (Note output is hexademical). Default 5 minutes. When the poller 39 (Note output is hexadecimal). Default 5 minutes. When the poller
40 finds MCEs it triggers an exponential speedup (poll more often) on 40 finds MCEs it triggers an exponential speedup (poll more often) on
41 the polling interval. When the poller stops finding MCEs, it 41 the polling interval. When the poller stops finding MCEs, it
42 triggers an exponential backoff (poll less often) on the polling 42 triggers an exponential backoff (poll less often) on the polling
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 87e1985f3be8..9d9fd4b9a72e 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -80,12 +80,14 @@
80#define APM_CPU_PART_POTENZA 0x000 80#define APM_CPU_PART_POTENZA 0x000
81 81
82#define CAVIUM_CPU_PART_THUNDERX 0x0A1 82#define CAVIUM_CPU_PART_THUNDERX 0x0A1
83#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
83 84
84#define BRCM_CPU_PART_VULCAN 0x516 85#define BRCM_CPU_PART_VULCAN 0x516
85 86
86#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) 87#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
87#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) 88#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
88#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 89#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
90#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
89 91
90#ifndef __ASSEMBLY__ 92#ifndef __ASSEMBLY__
91 93
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index a307eb6e7fa8..7f94755089e2 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -117,6 +117,8 @@ struct pt_regs {
117 }; 117 };
118 u64 orig_x0; 118 u64 orig_x0;
119 u64 syscallno; 119 u64 syscallno;
120 u64 orig_addr_limit;
121 u64 unused; // maintain 16 byte alignment
120}; 122};
121 123
122#define arch_has_single_step() (1) 124#define arch_has_single_step() (1)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index f8e5d47f0880..2f4ba774488a 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -60,6 +60,7 @@ int main(void)
60 DEFINE(S_PC, offsetof(struct pt_regs, pc)); 60 DEFINE(S_PC, offsetof(struct pt_regs, pc));
61 DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); 61 DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
62 DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); 62 DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
63 DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
63 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); 64 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
64 BLANK(); 65 BLANK();
65 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); 66 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index d42789499f17..af716b65110d 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
98 MIDR_RANGE(MIDR_THUNDERX, 0x00, 98 MIDR_RANGE(MIDR_THUNDERX, 0x00,
99 (1 << MIDR_VARIANT_SHIFT) | 1), 99 (1 << MIDR_VARIANT_SHIFT) | 1),
100 }, 100 },
101 {
102 /* Cavium ThunderX, T81 pass 1.0 */
103 .desc = "Cavium erratum 27456",
104 .capability = ARM64_WORKAROUND_CAVIUM_27456,
105 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
106 },
101#endif 107#endif
102 { 108 {
103 } 109 }
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 12e8d2bcb3f9..6c3b7345a6c4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -28,6 +28,7 @@
28#include <asm/errno.h> 28#include <asm/errno.h>
29#include <asm/esr.h> 29#include <asm/esr.h>
30#include <asm/irq.h> 30#include <asm/irq.h>
31#include <asm/memory.h>
31#include <asm/thread_info.h> 32#include <asm/thread_info.h>
32#include <asm/unistd.h> 33#include <asm/unistd.h>
33 34
@@ -97,7 +98,14 @@
97 mov x29, xzr // fp pointed to user-space 98 mov x29, xzr // fp pointed to user-space
98 .else 99 .else
99 add x21, sp, #S_FRAME_SIZE 100 add x21, sp, #S_FRAME_SIZE
100 .endif 101 get_thread_info tsk
102 /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
103 ldr x20, [tsk, #TI_ADDR_LIMIT]
104 str x20, [sp, #S_ORIG_ADDR_LIMIT]
105 mov x20, #TASK_SIZE_64
106 str x20, [tsk, #TI_ADDR_LIMIT]
107 ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
108 .endif /* \el == 0 */
101 mrs x22, elr_el1 109 mrs x22, elr_el1
102 mrs x23, spsr_el1 110 mrs x23, spsr_el1
103 stp lr, x21, [sp, #S_LR] 111 stp lr, x21, [sp, #S_LR]
@@ -128,6 +136,14 @@
128 .endm 136 .endm
129 137
130 .macro kernel_exit, el 138 .macro kernel_exit, el
139 .if \el != 0
140 /* Restore the task's original addr_limit. */
141 ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
142 str x20, [tsk, #TI_ADDR_LIMIT]
143
144 /* No need to restore UAO, it will be restored from SPSR_EL1 */
145 .endif
146
131 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR 147 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
132 .if \el == 0 148 .if \el == 0
133 ct_user_enter 149 ct_user_enter
@@ -406,7 +422,6 @@ el1_irq:
406 bl trace_hardirqs_off 422 bl trace_hardirqs_off
407#endif 423#endif
408 424
409 get_thread_info tsk
410 irq_handler 425 irq_handler
411 426
412#ifdef CONFIG_PREEMPT 427#ifdef CONFIG_PREEMPT
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 013e2cbe7924..b1166d1e5955 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
280 } 280 }
281 281
282 if (permission_fault(esr) && (addr < USER_DS)) { 282 if (permission_fault(esr) && (addr < USER_DS)) {
283 if (get_fs() == KERNEL_DS) 283 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
284 if (regs->orig_addr_limit == KERNEL_DS)
284 die("Accessing user space memory with fs=KERNEL_DS", regs, esr); 285 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
285 286
286 if (!search_exception_tables(regs->pc)) 287 if (!search_exception_tables(regs->pc))
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 33787ee817f0..26ced536005a 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2319,7 +2319,7 @@ void
2319perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 2319perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2320{ 2320{
2321 struct stack_frame frame; 2321 struct stack_frame frame;
2322 const void __user *fp; 2322 const unsigned long __user *fp;
2323 2323
2324 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2324 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2325 /* TODO: We don't support guest os callchain now */ 2325 /* TODO: We don't support guest os callchain now */
@@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2332 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) 2332 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2333 return; 2333 return;
2334 2334
2335 fp = (void __user *)regs->bp; 2335 fp = (unsigned long __user *)regs->bp;
2336 2336
2337 perf_callchain_store(entry, regs->ip); 2337 perf_callchain_store(entry, regs->ip);
2338 2338
@@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
2345 pagefault_disable(); 2345 pagefault_disable();
2346 while (entry->nr < entry->max_stack) { 2346 while (entry->nr < entry->max_stack) {
2347 unsigned long bytes; 2347 unsigned long bytes;
2348
2348 frame.next_frame = NULL; 2349 frame.next_frame = NULL;
2349 frame.return_address = 0; 2350 frame.return_address = 0;
2350 2351
2351 if (!access_ok(VERIFY_READ, fp, 16)) 2352 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
2352 break; 2353 break;
2353 2354
2354 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8); 2355 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
2355 if (bytes != 0) 2356 if (bytes != 0)
2356 break; 2357 break;
2357 bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8); 2358 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
2358 if (bytes != 0) 2359 if (bytes != 0)
2359 break; 2360 break;
2360 2361
diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
index 3660b2cf245a..06c2baa51814 100644
--- a/arch/x86/events/intel/Makefile
+++ b/arch/x86/events/intel/Makefile
@@ -1,8 +1,8 @@
1obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o 1obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
2obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o 2obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
3obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o 3obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
4obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o 4obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
5intel-rapl-objs := rapl.o 5intel-rapl-perf-objs := rapl.o
6obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o 6obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
7intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o 7intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
8obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o 8obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 7c666958a625..9b4f9d3ce465 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -115,6 +115,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ 115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
117 117
118 /*
119 * When HT is off these events can only run on the bottom 4 counters
120 * When HT is on, they are impacted by the HT bug and require EXCL access
121 */
118 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 122 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
119 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 123 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
120 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 124 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -139,6 +143,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
139 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ 143 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 144 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
141 145
146 /*
147 * When HT is off these events can only run on the bottom 4 counters
148 * When HT is on, they are impacted by the HT bug and require EXCL access
149 */
142 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 150 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
143 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 151 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
144 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 152 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -182,6 +190,16 @@ struct event_constraint intel_skl_event_constraints[] = {
182 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 190 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
183 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 191 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
184 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ 192 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
193
194 /*
195 * when HT is off, these can only run on the bottom 4 counters
196 */
197 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
198 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
199 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
200 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
201 INTEL_EVENT_CONSTRAINT(0xc6, 0xf), /* FRONTEND_RETIRED.* */
202
185 EVENT_CONSTRAINT_END 203 EVENT_CONSTRAINT_END
186}; 204};
187 205
@@ -250,6 +268,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
250 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ 268 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
251 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), 269 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
252 270
271 /*
272 * When HT is off these events can only run on the bottom 4 counters
273 * When HT is on, they are impacted by the HT bug and require EXCL access
274 */
253 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ 275 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
254 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 276 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
255 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 277 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -264,6 +286,13 @@ struct event_constraint intel_bdw_event_constraints[] = {
264 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 286 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
265 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ 287 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
266 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ 288 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
289 /*
290 * when HT is off, these can only run on the bottom 4 counters
291 */
292 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
293 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
294 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
295 INTEL_EVENT_CONSTRAINT(0xcd, 0xf), /* MEM_TRANS_RETIRED.* */
267 EVENT_CONSTRAINT_END 296 EVENT_CONSTRAINT_END
268}; 297};
269 298
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index a147e676fc7b..e991d5c8bb3a 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
71 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL) 71 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
72 i++; 72 i++;
73 73
74 if (i == 0) 74 if (!i)
75 return 0; 75 return -ENODEV;
76 76
77 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL); 77 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
78 if (!nb) 78 if (!nb)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index b2a4e2a61f6b..3cd69832d7f4 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
396 return -ENODEV; 396 return -ENODEV;
397 397
398 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n"); 398 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
399 acpi_irq_penalty_init();
399 pcibios_enable_irq = acpi_pci_irq_enable; 400 pcibios_enable_irq = acpi_pci_irq_enable;
400 pcibios_disable_irq = acpi_pci_irq_disable; 401 pcibios_disable_irq = acpi_pci_irq_disable;
401 x86_init.pci.init_irq = x86_init_noop; 402 x86_init.pci.init_irq = x86_init_noop;
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index 009947d419a6..f2b5e6a5cf95 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -19,6 +19,7 @@
19#include <asm/mtrr.h> 19#include <asm/mtrr.h>
20#include <asm/sections.h> 20#include <asm/sections.h>
21#include <asm/suspend.h> 21#include <asm/suspend.h>
22#include <asm/tlbflush.h>
22 23
23/* Defined in hibernate_asm_64.S */ 24/* Defined in hibernate_asm_64.S */
24extern asmlinkage __visible int restore_image(void); 25extern asmlinkage __visible int restore_image(void);
@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
28 * kernel's text (this value is passed in the image header). 29 * kernel's text (this value is passed in the image header).
29 */ 30 */
30unsigned long restore_jump_address __visible; 31unsigned long restore_jump_address __visible;
32unsigned long jump_address_phys;
31 33
32/* 34/*
33 * Value of the cr3 register from before the hibernation (this value is passed 35 * Value of the cr3 register from before the hibernation (this value is passed
@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
37 39
38pgd_t *temp_level4_pgt __visible; 40pgd_t *temp_level4_pgt __visible;
39 41
40void *relocated_restore_code __visible; 42unsigned long relocated_restore_code __visible;
43
44static int set_up_temporary_text_mapping(void)
45{
46 pmd_t *pmd;
47 pud_t *pud;
48
49 /*
50 * The new mapping only has to cover the page containing the image
51 * kernel's entry point (jump_address_phys), because the switch over to
52 * it is carried out by relocated code running from a page allocated
53 * specifically for this purpose and covered by the identity mapping, so
54 * the temporary kernel text mapping is only needed for the final jump.
55 * Moreover, in that mapping the virtual address of the image kernel's
56 * entry point must be the same as its virtual address in the image
57 * kernel (restore_jump_address), so the image kernel's
58 * restore_registers() code doesn't find itself in a different area of
59 * the virtual address space after switching over to the original page
60 * tables used by the image kernel.
61 */
62 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
63 if (!pud)
64 return -ENOMEM;
65
66 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
67 if (!pmd)
68 return -ENOMEM;
69
70 set_pmd(pmd + pmd_index(restore_jump_address),
71 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
72 set_pud(pud + pud_index(restore_jump_address),
73 __pud(__pa(pmd) | _KERNPG_TABLE));
74 set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
75 __pgd(__pa(pud) | _KERNPG_TABLE));
76
77 return 0;
78}
41 79
42static void *alloc_pgt_page(void *context) 80static void *alloc_pgt_page(void *context)
43{ 81{
@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
59 if (!temp_level4_pgt) 97 if (!temp_level4_pgt)
60 return -ENOMEM; 98 return -ENOMEM;
61 99
62 /* It is safe to reuse the original kernel mapping */ 100 /* Prepare a temporary mapping for the kernel text */
63 set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), 101 result = set_up_temporary_text_mapping();
64 init_level4_pgt[pgd_index(__START_KERNEL_map)]); 102 if (result)
103 return result;
65 104
66 /* Set up the direct mapping from scratch */ 105 /* Set up the direct mapping from scratch */
67 for (i = 0; i < nr_pfn_mapped; i++) { 106 for (i = 0; i < nr_pfn_mapped; i++) {
@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
78 return 0; 117 return 0;
79} 118}
80 119
120static int relocate_restore_code(void)
121{
122 pgd_t *pgd;
123 pud_t *pud;
124
125 relocated_restore_code = get_safe_page(GFP_ATOMIC);
126 if (!relocated_restore_code)
127 return -ENOMEM;
128
129 memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
130
131 /* Make the page containing the relocated code executable */
132 pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
133 pud = pud_offset(pgd, relocated_restore_code);
134 if (pud_large(*pud)) {
135 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
136 } else {
137 pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
138
139 if (pmd_large(*pmd)) {
140 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
141 } else {
142 pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
143
144 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
145 }
146 }
147 __flush_tlb_all();
148
149 return 0;
150}
151
81int swsusp_arch_resume(void) 152int swsusp_arch_resume(void)
82{ 153{
83 int error; 154 int error;
84 155
85 /* We have got enough memory and from now on we cannot recover */ 156 /* We have got enough memory and from now on we cannot recover */
86 if ((error = set_up_temporary_mappings())) 157 error = set_up_temporary_mappings();
158 if (error)
87 return error; 159 return error;
88 160
89 relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC); 161 error = relocate_restore_code();
90 if (!relocated_restore_code) 162 if (error)
91 return -ENOMEM; 163 return error;
92 memcpy(relocated_restore_code, &core_restore_code,
93 &restore_registers - &core_restore_code);
94 164
95 restore_image(); 165 restore_image();
96 return 0; 166 return 0;
@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
109 179
110struct restore_data_record { 180struct restore_data_record {
111 unsigned long jump_address; 181 unsigned long jump_address;
182 unsigned long jump_address_phys;
112 unsigned long cr3; 183 unsigned long cr3;
113 unsigned long magic; 184 unsigned long magic;
114}; 185};
115 186
116#define RESTORE_MAGIC 0x0123456789ABCDEFUL 187#define RESTORE_MAGIC 0x123456789ABCDEF0UL
117 188
118/** 189/**
119 * arch_hibernation_header_save - populate the architecture specific part 190 * arch_hibernation_header_save - populate the architecture specific part
@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
126 197
127 if (max_size < sizeof(struct restore_data_record)) 198 if (max_size < sizeof(struct restore_data_record))
128 return -EOVERFLOW; 199 return -EOVERFLOW;
129 rdr->jump_address = restore_jump_address; 200 rdr->jump_address = (unsigned long)&restore_registers;
201 rdr->jump_address_phys = __pa_symbol(&restore_registers);
130 rdr->cr3 = restore_cr3; 202 rdr->cr3 = restore_cr3;
131 rdr->magic = RESTORE_MAGIC; 203 rdr->magic = RESTORE_MAGIC;
132 return 0; 204 return 0;
@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
142 struct restore_data_record *rdr = addr; 214 struct restore_data_record *rdr = addr;
143 215
144 restore_jump_address = rdr->jump_address; 216 restore_jump_address = rdr->jump_address;
217 jump_address_phys = rdr->jump_address_phys;
145 restore_cr3 = rdr->cr3; 218 restore_cr3 = rdr->cr3;
146 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; 219 return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
147} 220}
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index 4400a43b9e28..3177c2bc26f6 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
44 pushfq 44 pushfq
45 popq pt_regs_flags(%rax) 45 popq pt_regs_flags(%rax)
46 46
47 /* save the address of restore_registers */
48 movq $restore_registers, %rax
49 movq %rax, restore_jump_address(%rip)
50 /* save cr3 */ 47 /* save cr3 */
51 movq %cr3, %rax 48 movq %cr3, %rax
52 movq %rax, restore_cr3(%rip) 49 movq %rax, restore_cr3(%rip)
@@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
57ENDPROC(swsusp_arch_suspend) 54ENDPROC(swsusp_arch_suspend)
58 55
59ENTRY(restore_image) 56ENTRY(restore_image)
60 /* switch to temporary page tables */
61 movq $__PAGE_OFFSET, %rdx
62 movq temp_level4_pgt(%rip), %rax
63 subq %rdx, %rax
64 movq %rax, %cr3
65 /* Flush TLB */
66 movq mmu_cr4_features(%rip), %rax
67 movq %rax, %rdx
68 andq $~(X86_CR4_PGE), %rdx
69 movq %rdx, %cr4; # turn off PGE
70 movq %cr3, %rcx; # flush TLB
71 movq %rcx, %cr3;
72 movq %rax, %cr4; # turn PGE back on
73
74 /* prepare to jump to the image kernel */ 57 /* prepare to jump to the image kernel */
75 movq restore_jump_address(%rip), %rax 58 movq restore_jump_address(%rip), %r8
76 movq restore_cr3(%rip), %rbx 59 movq restore_cr3(%rip), %r9
60
61 /* prepare to switch to temporary page tables */
62 movq temp_level4_pgt(%rip), %rax
63 movq mmu_cr4_features(%rip), %rbx
77 64
78 /* prepare to copy image data to their original locations */ 65 /* prepare to copy image data to their original locations */
79 movq restore_pblist(%rip), %rdx 66 movq restore_pblist(%rip), %rdx
67
68 /* jump to relocated restore code */
80 movq relocated_restore_code(%rip), %rcx 69 movq relocated_restore_code(%rip), %rcx
81 jmpq *%rcx 70 jmpq *%rcx
82 71
83 /* code below has been relocated to a safe page */ 72 /* code below has been relocated to a safe page */
84ENTRY(core_restore_code) 73ENTRY(core_restore_code)
74 /* switch to temporary page tables */
75 movq $__PAGE_OFFSET, %rcx
76 subq %rcx, %rax
77 movq %rax, %cr3
78 /* flush TLB */
79 movq %rbx, %rcx
80 andq $~(X86_CR4_PGE), %rcx
81 movq %rcx, %cr4; # turn off PGE
82 movq %cr3, %rcx; # flush TLB
83 movq %rcx, %cr3;
84 movq %rbx, %cr4; # turn PGE back on
85.Lloop: 85.Lloop:
86 testq %rdx, %rdx 86 testq %rdx, %rdx
87 jz .Ldone 87 jz .Ldone
@@ -96,24 +96,17 @@ ENTRY(core_restore_code)
96 /* progress to the next pbe */ 96 /* progress to the next pbe */
97 movq pbe_next(%rdx), %rdx 97 movq pbe_next(%rdx), %rdx
98 jmp .Lloop 98 jmp .Lloop
99
99.Ldone: 100.Ldone:
100 /* jump to the restore_registers address from the image header */ 101 /* jump to the restore_registers address from the image header */
101 jmpq *%rax 102 jmpq *%r8
102 /*
103 * NOTE: This assumes that the boot kernel's text mapping covers the
104 * image kernel's page containing restore_registers and the address of
105 * this page is the same as in the image kernel's text mapping (it
106 * should always be true, because the text mapping is linear, starting
107 * from 0, and is supposed to cover the entire kernel text for every
108 * kernel).
109 *
110 * code below belongs to the image kernel
111 */
112 103
104 /* code below belongs to the image kernel */
105 .align PAGE_SIZE
113ENTRY(restore_registers) 106ENTRY(restore_registers)
114 FRAME_BEGIN 107 FRAME_BEGIN
115 /* go back to the original page tables */ 108 /* go back to the original page tables */
116 movq %rbx, %cr3 109 movq %r9, %cr3
117 110
118 /* Flush TLB, including "global" things (vmalloc) */ 111 /* Flush TLB, including "global" things (vmalloc) */
119 movq mmu_cr4_features(%rip), %rax 112 movq mmu_cr4_features(%rip), %rax
diff --git a/block/ioprio.c b/block/ioprio.c
index cc7800e9eb44..01b8116298a1 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
150 if (ret) 150 if (ret)
151 goto out; 151 goto out;
152 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); 152 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
153 task_lock(p);
153 if (p->io_context) 154 if (p->io_context)
154 ret = p->io_context->ioprio; 155 ret = p->io_context->ioprio;
156 task_unlock(p);
155out: 157out:
156 return ret; 158 return ret;
157} 159}
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 1f4128487dd4..dee86925a9a1 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
602 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1); 602 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
603 ret = n; 603 ret = n;
604out: 604out:
605 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret); 605 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
606 return ret; 606 return ret;
607} 607}
608 608
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
672 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1); 672 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
673 ret = n; 673 ret = n;
674out: 674out:
675 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret); 675 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
676 return n; 676 return n;
677} 677}
678 678
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index b5e2b0ada0ab..297f6aacd7d4 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -46,6 +46,7 @@
46#include "acnamesp.h" 46#include "acnamesp.h"
47#include "acdispat.h" 47#include "acdispat.h"
48#include "actables.h" 48#include "actables.h"
49#include "acinterp.h"
49 50
50#define _COMPONENT ACPI_NAMESPACE 51#define _COMPONENT ACPI_NAMESPACE
51ACPI_MODULE_NAME("nsload") 52ACPI_MODULE_NAME("nsload")
@@ -78,6 +79,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
78 79
79 ACPI_FUNCTION_TRACE(ns_load_table); 80 ACPI_FUNCTION_TRACE(ns_load_table);
80 81
82 acpi_ex_enter_interpreter();
83
81 /* 84 /*
82 * Parse the table and load the namespace with all named 85 * Parse the table and load the namespace with all named
83 * objects found within. Control methods are NOT parsed 86 * objects found within. Control methods are NOT parsed
@@ -89,7 +92,7 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
89 */ 92 */
90 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 93 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
91 if (ACPI_FAILURE(status)) { 94 if (ACPI_FAILURE(status)) {
92 return_ACPI_STATUS(status); 95 goto unlock_interp;
93 } 96 }
94 97
95 /* If table already loaded into namespace, just return */ 98 /* If table already loaded into namespace, just return */
@@ -130,6 +133,8 @@ acpi_ns_load_table(u32 table_index, struct acpi_namespace_node *node)
130 133
131unlock: 134unlock:
132 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 135 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
136unlock_interp:
137 (void)acpi_ex_exit_interpreter();
133 138
134 if (ACPI_FAILURE(status)) { 139 if (ACPI_FAILURE(status)) {
135 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 1783cd7e1446..f631a47724f0 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -47,7 +47,6 @@
47#include "acparser.h" 47#include "acparser.h"
48#include "acdispat.h" 48#include "acdispat.h"
49#include "actables.h" 49#include "actables.h"
50#include "acinterp.h"
51 50
52#define _COMPONENT ACPI_NAMESPACE 51#define _COMPONENT ACPI_NAMESPACE
53ACPI_MODULE_NAME("nsparse") 52ACPI_MODULE_NAME("nsparse")
@@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
171 170
172 ACPI_FUNCTION_TRACE(ns_parse_table); 171 ACPI_FUNCTION_TRACE(ns_parse_table);
173 172
174 acpi_ex_enter_interpreter();
175
176 /* 173 /*
177 * AML Parse, pass 1 174 * AML Parse, pass 1
178 * 175 *
@@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
188 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, 185 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
189 table_index, start_node); 186 table_index, start_node);
190 if (ACPI_FAILURE(status)) { 187 if (ACPI_FAILURE(status)) {
191 goto error_exit; 188 return_ACPI_STATUS(status);
192 } 189 }
193 190
194 /* 191 /*
@@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
204 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, 201 status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
205 table_index, start_node); 202 table_index, start_node);
206 if (ACPI_FAILURE(status)) { 203 if (ACPI_FAILURE(status)) {
207 goto error_exit; 204 return_ACPI_STATUS(status);
208 } 205 }
209 206
210error_exit:
211 acpi_ex_exit_interpreter();
212 return_ACPI_STATUS(status); 207 return_ACPI_STATUS(status);
213} 208}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 4ed4061813e6..c983bf733ad3 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
470{ 470{
471 struct acpi_pci_link *link; 471 struct acpi_pci_link *link;
472 int penalty = 0; 472 int penalty = 0;
473 int i;
473 474
474 list_for_each_entry(link, &acpi_link_list, list) { 475 list_for_each_entry(link, &acpi_link_list, list) {
475 /* 476 /*
@@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
478 */ 479 */
479 if (link->irq.active && link->irq.active == irq) 480 if (link->irq.active && link->irq.active == irq)
480 penalty += PIRQ_PENALTY_PCI_USING; 481 penalty += PIRQ_PENALTY_PCI_USING;
481 else { 482
482 int i; 483 /*
483 484 * penalize the IRQs PCI might use, but not as severely.
484 /* 485 */
485 * If a link is inactive, penalize the IRQs it 486 for (i = 0; i < link->irq.possible_count; i++)
486 * might use, but not as severely. 487 if (link->irq.possible[i] == irq)
487 */ 488 penalty += PIRQ_PENALTY_PCI_POSSIBLE /
488 for (i = 0; i < link->irq.possible_count; i++) 489 link->irq.possible_count;
489 if (link->irq.possible[i] == irq)
490 penalty += PIRQ_PENALTY_PCI_POSSIBLE /
491 link->irq.possible_count;
492 }
493 } 490 }
494 491
495 return penalty; 492 return penalty;
@@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
499{ 496{
500 int penalty = 0; 497 int penalty = 0;
501 498
502 if (irq < ACPI_MAX_ISA_IRQS)
503 penalty += acpi_isa_irq_penalty[irq];
504
505 /* 499 /*
506 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict 500 * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
507 * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be 501 * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
@@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
516 penalty += PIRQ_PENALTY_PCI_USING; 510 penalty += PIRQ_PENALTY_PCI_USING;
517 } 511 }
518 512
513 if (irq < ACPI_MAX_ISA_IRQS)
514 return penalty + acpi_isa_irq_penalty[irq];
515
519 penalty += acpi_irq_pci_sharing_penalty(irq); 516 penalty += acpi_irq_pci_sharing_penalty(irq);
520 return penalty; 517 return penalty;
521} 518}
522 519
520int __init acpi_irq_penalty_init(void)
521{
522 struct acpi_pci_link *link;
523 int i;
524
525 /*
526 * Update penalties to facilitate IRQ balancing.
527 */
528 list_for_each_entry(link, &acpi_link_list, list) {
529
530 /*
531 * reflect the possible and active irqs in the penalty table --
532 * useful for breaking ties.
533 */
534 if (link->irq.possible_count) {
535 int penalty =
536 PIRQ_PENALTY_PCI_POSSIBLE /
537 link->irq.possible_count;
538
539 for (i = 0; i < link->irq.possible_count; i++) {
540 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
541 acpi_isa_irq_penalty[link->irq.
542 possible[i]] +=
543 penalty;
544 }
545
546 } else if (link->irq.active &&
547 (link->irq.active < ACPI_MAX_ISA_IRQS)) {
548 acpi_isa_irq_penalty[link->irq.active] +=
549 PIRQ_PENALTY_PCI_POSSIBLE;
550 }
551 }
552
553 return 0;
554}
555
523static int acpi_irq_balance = -1; /* 0: static, 1: balance */ 556static int acpi_irq_balance = -1; /* 0: static, 1: balance */
524 557
525static int acpi_pci_link_allocate(struct acpi_pci_link *link) 558static int acpi_pci_link_allocate(struct acpi_pci_link *link)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2e6d1e9c3345..fcc5b4e0aef2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -207,6 +207,9 @@ struct blkfront_info
207 struct blk_mq_tag_set tag_set; 207 struct blk_mq_tag_set tag_set;
208 struct blkfront_ring_info *rinfo; 208 struct blkfront_ring_info *rinfo;
209 unsigned int nr_rings; 209 unsigned int nr_rings;
210 /* Save uncomplete reqs and bios for migration. */
211 struct list_head requests;
212 struct bio_list bio_list;
210}; 213};
211 214
212static unsigned int nr_minors; 215static unsigned int nr_minors;
@@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
2002{ 2005{
2003 unsigned int i, r_index; 2006 unsigned int i, r_index;
2004 struct request *req, *n; 2007 struct request *req, *n;
2005 struct blk_shadow *copy;
2006 int rc; 2008 int rc;
2007 struct bio *bio, *cloned_bio; 2009 struct bio *bio, *cloned_bio;
2008 struct bio_list bio_list, merge_bio;
2009 unsigned int segs, offset; 2010 unsigned int segs, offset;
2010 int pending, size; 2011 int pending, size;
2011 struct split_bio *split_bio; 2012 struct split_bio *split_bio;
2012 struct list_head requests;
2013 2013
2014 blkfront_gather_backend_features(info); 2014 blkfront_gather_backend_features(info);
2015 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; 2015 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2016 blk_queue_max_segments(info->rq, segs); 2016 blk_queue_max_segments(info->rq, segs);
2017 bio_list_init(&bio_list);
2018 INIT_LIST_HEAD(&requests);
2019 2017
2020 for (r_index = 0; r_index < info->nr_rings; r_index++) { 2018 for (r_index = 0; r_index < info->nr_rings; r_index++) {
2021 struct blkfront_ring_info *rinfo; 2019 struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
2022
2023 rinfo = &info->rinfo[r_index];
2024 /* Stage 1: Make a safe copy of the shadow state. */
2025 copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
2026 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
2027 if (!copy)
2028 return -ENOMEM;
2029
2030 /* Stage 2: Set up free list. */
2031 memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
2032 for (i = 0; i < BLK_RING_SIZE(info); i++)
2033 rinfo->shadow[i].req.u.rw.id = i+1;
2034 rinfo->shadow_free = rinfo->ring.req_prod_pvt;
2035 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
2036 2020
2037 rc = blkfront_setup_indirect(rinfo); 2021 rc = blkfront_setup_indirect(rinfo);
2038 if (rc) { 2022 if (rc)
2039 kfree(copy);
2040 return rc; 2023 return rc;
2041 }
2042
2043 for (i = 0; i < BLK_RING_SIZE(info); i++) {
2044 /* Not in use? */
2045 if (!copy[i].request)
2046 continue;
2047
2048 /*
2049 * Get the bios in the request so we can re-queue them.
2050 */
2051 if (copy[i].request->cmd_flags &
2052 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
2053 /*
2054 * Flush operations don't contain bios, so
2055 * we need to requeue the whole request
2056 */
2057 list_add(&copy[i].request->queuelist, &requests);
2058 continue;
2059 }
2060 merge_bio.head = copy[i].request->bio;
2061 merge_bio.tail = copy[i].request->biotail;
2062 bio_list_merge(&bio_list, &merge_bio);
2063 copy[i].request->bio = NULL;
2064 blk_end_request_all(copy[i].request, 0);
2065 }
2066
2067 kfree(copy);
2068 } 2024 }
2069 xenbus_switch_state(info->xbdev, XenbusStateConnected); 2025 xenbus_switch_state(info->xbdev, XenbusStateConnected);
2070 2026
@@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
2079 kick_pending_request_queues(rinfo); 2035 kick_pending_request_queues(rinfo);
2080 } 2036 }
2081 2037
2082 list_for_each_entry_safe(req, n, &requests, queuelist) { 2038 list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2083 /* Requeue pending requests (flush or discard) */ 2039 /* Requeue pending requests (flush or discard) */
2084 list_del_init(&req->queuelist); 2040 list_del_init(&req->queuelist);
2085 BUG_ON(req->nr_phys_segments > segs); 2041 BUG_ON(req->nr_phys_segments > segs);
@@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
2087 } 2043 }
2088 blk_mq_kick_requeue_list(info->rq); 2044 blk_mq_kick_requeue_list(info->rq);
2089 2045
2090 while ((bio = bio_list_pop(&bio_list)) != NULL) { 2046 while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2091 /* Traverse the list of pending bios and re-queue them */ 2047 /* Traverse the list of pending bios and re-queue them */
2092 if (bio_segments(bio) > segs) { 2048 if (bio_segments(bio) > segs) {
2093 /* 2049 /*
@@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
2133{ 2089{
2134 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2090 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2135 int err = 0; 2091 int err = 0;
2092 unsigned int i, j;
2136 2093
2137 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2094 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2138 2095
2096 bio_list_init(&info->bio_list);
2097 INIT_LIST_HEAD(&info->requests);
2098 for (i = 0; i < info->nr_rings; i++) {
2099 struct blkfront_ring_info *rinfo = &info->rinfo[i];
2100 struct bio_list merge_bio;
2101 struct blk_shadow *shadow = rinfo->shadow;
2102
2103 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2104 /* Not in use? */
2105 if (!shadow[j].request)
2106 continue;
2107
2108 /*
2109 * Get the bios in the request so we can re-queue them.
2110 */
2111 if (shadow[j].request->cmd_flags &
2112 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
2113 /*
2114 * Flush operations don't contain bios, so
2115 * we need to requeue the whole request
2116 */
2117 list_add(&shadow[j].request->queuelist, &info->requests);
2118 continue;
2119 }
2120 merge_bio.head = shadow[j].request->bio;
2121 merge_bio.tail = shadow[j].request->biotail;
2122 bio_list_merge(&info->bio_list, &merge_bio);
2123 shadow[j].request->bio = NULL;
2124 blk_mq_end_request(shadow[j].request, 0);
2125 }
2126 }
2127
2139 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2128 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2140 2129
2141 err = negotiate_mq(info); 2130 err = negotiate_mq(info);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index a4d0059e232c..c73207abb5a4 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
173 173
174 struct cpuidle_state *target_state = &drv->states[index]; 174 struct cpuidle_state *target_state = &drv->states[index];
175 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); 175 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
176 u64 time_start, time_end; 176 ktime_t time_start, time_end;
177 s64 diff; 177 s64 diff;
178 178
179 /* 179 /*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
195 sched_idle_set_state(target_state); 195 sched_idle_set_state(target_state);
196 196
197 trace_cpu_idle_rcuidle(index, dev->cpu); 197 trace_cpu_idle_rcuidle(index, dev->cpu);
198 time_start = local_clock(); 198 time_start = ns_to_ktime(local_clock());
199 199
200 stop_critical_timings(); 200 stop_critical_timings();
201 entered_state = target_state->enter(dev, drv, index); 201 entered_state = target_state->enter(dev, drv, index);
202 start_critical_timings(); 202 start_critical_timings();
203 203
204 time_end = local_clock(); 204 time_end = ns_to_ktime(local_clock());
205 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); 205 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
206 206
207 /* The cpu is no longer idle or about to enter idle. */ 207 /* The cpu is no longer idle or about to enter idle. */
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
217 if (!cpuidle_state_is_coupled(drv, index)) 217 if (!cpuidle_state_is_coupled(drv, index))
218 local_irq_enable(); 218 local_irq_enable();
219 219
220 /* 220 diff = ktime_us_delta(time_end, time_start);
221 * local_clock() returns the time in nanosecond, let's shift
222 * by 10 (divide by 1024) to have microsecond based time.
223 */
224 diff = (time_end - time_start) >> 10;
225 if (diff > INT_MAX) 221 if (diff > INT_MAX)
226 diff = INT_MAX; 222 diff = INT_MAX;
227 223
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index cebcb405812e..536112fd2466 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -49,7 +49,7 @@ config GPIO_DEVRES
49 49
50config OF_GPIO 50config OF_GPIO
51 def_bool y 51 def_bool y
52 depends on OF || COMPILE_TEST 52 depends on OF
53 53
54config GPIO_ACPI 54config GPIO_ACPI
55 def_bool y 55 def_bool y
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index e85e7539cf5d..eb43ae4835c1 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
61 return gpio % 8; 61 return gpio % 8;
62} 62}
63 63
64static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg) 64static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
65{ 65{
66 struct sch_gpio *sch = gpiochip_get_data(gc);
67 unsigned short offset, bit; 66 unsigned short offset, bit;
68 u8 reg_val; 67 u8 reg_val;
69 68
@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
75 return reg_val; 74 return reg_val;
76} 75}
77 76
78static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg, 77static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
79 int val) 78 int val)
80{ 79{
81 struct sch_gpio *sch = gpiochip_get_data(gc);
82 unsigned short offset, bit; 80 unsigned short offset, bit;
83 u8 reg_val; 81 u8 reg_val;
84 82
@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
98 struct sch_gpio *sch = gpiochip_get_data(gc); 96 struct sch_gpio *sch = gpiochip_get_data(gc);
99 97
100 spin_lock(&sch->lock); 98 spin_lock(&sch->lock);
101 sch_gpio_reg_set(gc, gpio_num, GIO, 1); 99 sch_gpio_reg_set(sch, gpio_num, GIO, 1);
102 spin_unlock(&sch->lock); 100 spin_unlock(&sch->lock);
103 return 0; 101 return 0;
104} 102}
105 103
106static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num) 104static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
107{ 105{
108 return sch_gpio_reg_get(gc, gpio_num, GLV); 106 struct sch_gpio *sch = gpiochip_get_data(gc);
107 return sch_gpio_reg_get(sch, gpio_num, GLV);
109} 108}
110 109
111static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val) 110static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
113 struct sch_gpio *sch = gpiochip_get_data(gc); 112 struct sch_gpio *sch = gpiochip_get_data(gc);
114 113
115 spin_lock(&sch->lock); 114 spin_lock(&sch->lock);
116 sch_gpio_reg_set(gc, gpio_num, GLV, val); 115 sch_gpio_reg_set(sch, gpio_num, GLV, val);
117 spin_unlock(&sch->lock); 116 spin_unlock(&sch->lock);
118} 117}
119 118
@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
123 struct sch_gpio *sch = gpiochip_get_data(gc); 122 struct sch_gpio *sch = gpiochip_get_data(gc);
124 123
125 spin_lock(&sch->lock); 124 spin_lock(&sch->lock);
126 sch_gpio_reg_set(gc, gpio_num, GIO, 0); 125 sch_gpio_reg_set(sch, gpio_num, GIO, 0);
127 spin_unlock(&sch->lock); 126 spin_unlock(&sch->lock);
128 127
129 /* 128 /*
@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
182 * GPIO7 is configured by the CMC as SLPIOVR 181 * GPIO7 is configured by the CMC as SLPIOVR
183 * Enable GPIO[9:8] core powered gpios explicitly 182 * Enable GPIO[9:8] core powered gpios explicitly
184 */ 183 */
185 sch_gpio_reg_set(&sch->chip, 8, GEN, 1); 184 sch_gpio_reg_set(sch, 8, GEN, 1);
186 sch_gpio_reg_set(&sch->chip, 9, GEN, 1); 185 sch_gpio_reg_set(sch, 9, GEN, 1);
187 /* 186 /*
188 * SUS_GPIO[2:0] enabled by default 187 * SUS_GPIO[2:0] enabled by default
189 * Enable SUS_GPIO3 resume powered gpio explicitly 188 * Enable SUS_GPIO3 resume powered gpio explicitly
190 */ 189 */
191 sch_gpio_reg_set(&sch->chip, 13, GEN, 1); 190 sch_gpio_reg_set(sch, 13, GEN, 1);
192 break; 191 break;
193 192
194 case PCI_DEVICE_ID_INTEL_ITC_LPC: 193 case PCI_DEVICE_ID_INTEL_ITC_LPC:
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 3a5c7011ad3b..8b830996fe02 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
28 if (!desc && gpio_is_valid(gpio)) 28 if (!desc && gpio_is_valid(gpio))
29 return -EPROBE_DEFER; 29 return -EPROBE_DEFER;
30 30
31 err = gpiod_request(desc, label);
32 if (err)
33 return err;
34
31 if (flags & GPIOF_OPEN_DRAIN) 35 if (flags & GPIOF_OPEN_DRAIN)
32 set_bit(FLAG_OPEN_DRAIN, &desc->flags); 36 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
33 37
@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
37 if (flags & GPIOF_ACTIVE_LOW) 41 if (flags & GPIOF_ACTIVE_LOW)
38 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 42 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
39 43
40 err = gpiod_request(desc, label);
41 if (err)
42 return err;
43
44 if (flags & GPIOF_DIR_IN) 44 if (flags & GPIOF_DIR_IN)
45 err = gpiod_direction_input(desc); 45 err = gpiod_direction_input(desc);
46 else 46 else
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 570771ed19e6..be74bd370f1f 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
1352 spin_lock_irqsave(&gpio_lock, flags); 1352 spin_lock_irqsave(&gpio_lock, flags);
1353 } 1353 }
1354done: 1354done:
1355 if (status < 0) {
1356 /* Clear flags that might have been set by the caller before
1357 * requesting the GPIO.
1358 */
1359 clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
1360 clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
1361 clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
1362 }
1363 spin_unlock_irqrestore(&gpio_lock, flags); 1355 spin_unlock_irqrestore(&gpio_lock, flags);
1364 return status; 1356 return status;
1365} 1357}
@@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
2587} 2579}
2588EXPORT_SYMBOL_GPL(gpiod_get_optional); 2580EXPORT_SYMBOL_GPL(gpiod_get_optional);
2589 2581
2590/**
2591 * gpiod_parse_flags - helper function to parse GPIO lookup flags
2592 * @desc: gpio to be setup
2593 * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
2594 * of_get_gpio_hog()
2595 *
2596 * Set the GPIO descriptor flags based on the given GPIO lookup flags.
2597 */
2598static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
2599{
2600 if (lflags & GPIO_ACTIVE_LOW)
2601 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2602 if (lflags & GPIO_OPEN_DRAIN)
2603 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
2604 if (lflags & GPIO_OPEN_SOURCE)
2605 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2606}
2607 2582
2608/** 2583/**
2609 * gpiod_configure_flags - helper function to configure a given GPIO 2584 * gpiod_configure_flags - helper function to configure a given GPIO
2610 * @desc: gpio whose value will be assigned 2585 * @desc: gpio whose value will be assigned
2611 * @con_id: function within the GPIO consumer 2586 * @con_id: function within the GPIO consumer
2587 * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
2588 * of_get_gpio_hog()
2612 * @dflags: gpiod_flags - optional GPIO initialization flags 2589 * @dflags: gpiod_flags - optional GPIO initialization flags
2613 * 2590 *
2614 * Return 0 on success, -ENOENT if no GPIO has been assigned to the 2591 * Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
2616 * occurred while trying to acquire the GPIO. 2593 * occurred while trying to acquire the GPIO.
2617 */ 2594 */
2618static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, 2595static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
2619 enum gpiod_flags dflags) 2596 unsigned long lflags, enum gpiod_flags dflags)
2620{ 2597{
2621 int status; 2598 int status;
2622 2599
2600 if (lflags & GPIO_ACTIVE_LOW)
2601 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2602 if (lflags & GPIO_OPEN_DRAIN)
2603 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
2604 if (lflags & GPIO_OPEN_SOURCE)
2605 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2606
2623 /* No particular flag request, return here... */ 2607 /* No particular flag request, return here... */
2624 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) { 2608 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
2625 pr_debug("no flags found for %s\n", con_id); 2609 pr_debug("no flags found for %s\n", con_id);
@@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
2686 return desc; 2670 return desc;
2687 } 2671 }
2688 2672
2689 gpiod_parse_flags(desc, lookupflags);
2690
2691 status = gpiod_request(desc, con_id); 2673 status = gpiod_request(desc, con_id);
2692 if (status < 0) 2674 if (status < 0)
2693 return ERR_PTR(status); 2675 return ERR_PTR(status);
2694 2676
2695 status = gpiod_configure_flags(desc, con_id, flags); 2677 status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
2696 if (status < 0) { 2678 if (status < 0) {
2697 dev_dbg(dev, "setup of GPIO %s failed\n", con_id); 2679 dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
2698 gpiod_put(desc); 2680 gpiod_put(desc);
@@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
2748 if (IS_ERR(desc)) 2730 if (IS_ERR(desc))
2749 return desc; 2731 return desc;
2750 2732
2733 ret = gpiod_request(desc, NULL);
2734 if (ret)
2735 return ERR_PTR(ret);
2736
2751 if (active_low) 2737 if (active_low)
2752 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 2738 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
2753 2739
@@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
2758 set_bit(FLAG_OPEN_SOURCE, &desc->flags); 2744 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
2759 } 2745 }
2760 2746
2761 ret = gpiod_request(desc, NULL);
2762 if (ret)
2763 return ERR_PTR(ret);
2764
2765 return desc; 2747 return desc;
2766} 2748}
2767EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod); 2749EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
2814 chip = gpiod_to_chip(desc); 2796 chip = gpiod_to_chip(desc);
2815 hwnum = gpio_chip_hwgpio(desc); 2797 hwnum = gpio_chip_hwgpio(desc);
2816 2798
2817 gpiod_parse_flags(desc, lflags);
2818
2819 local_desc = gpiochip_request_own_desc(chip, hwnum, name); 2799 local_desc = gpiochip_request_own_desc(chip, hwnum, name);
2820 if (IS_ERR(local_desc)) { 2800 if (IS_ERR(local_desc)) {
2821 status = PTR_ERR(local_desc); 2801 status = PTR_ERR(local_desc);
@@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
2824 return status; 2804 return status;
2825 } 2805 }
2826 2806
2827 status = gpiod_configure_flags(desc, name, dflags); 2807 status = gpiod_configure_flags(desc, name, lflags, dflags);
2828 if (status < 0) { 2808 if (status < 0) {
2829 pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n", 2809 pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
2830 name, chip->label, hwnum, status); 2810 name, chip->label, hwnum, status);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index ec2a7ada346a..91e25f942d90 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -98,7 +98,6 @@
98#define PCIE_BUS_CLK 10000 98#define PCIE_BUS_CLK 10000
99#define TCLK (PCIE_BUS_CLK / 10) 99#define TCLK (PCIE_BUS_CLK / 10)
100 100
101#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
102 101
103static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] = 102static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
104{ {600, 1050, 3, 0}, {600, 1050, 6, 1} }; 103{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
@@ -733,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
733 table->Smio[level] |= 732 table->Smio[level] |=
734 data->mvdd_voltage_table.entries[level].smio_low; 733 data->mvdd_voltage_table.entries[level].smio_low;
735 } 734 }
736 table->SmioMask2 = data->vddci_voltage_table.mask_low; 735 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
737 736
738 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); 737 table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
739 } 738 }
@@ -1807,27 +1806,25 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1807 1806
1808 ro = efuse * (max -min)/255 + min; 1807 ro = efuse * (max -min)/255 + min;
1809 1808
1810 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset 1809 /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
1811 * there is a little difference in calculating
1812 * volt_with_cks with windows */
1813 for (i = 0; i < sclk_table->count; i++) { 1810 for (i = 0; i < sclk_table->count; i++) {
1814 data->smc_state_table.Sclk_CKS_masterEn0_7 |= 1811 data->smc_state_table.Sclk_CKS_masterEn0_7 |=
1815 sclk_table->entries[i].cks_enable << i; 1812 sclk_table->entries[i].cks_enable << i;
1816 if (hwmgr->chip_id == CHIP_POLARIS10) { 1813 if (hwmgr->chip_id == CHIP_POLARIS10) {
1817 volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \ 1814 volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
1818 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); 1815 (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
1819 volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \ 1816 volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
1820 (252248000 - sclk_table->entries[i].clk/100 * 115764)); 1817 (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
1821 } else { 1818 } else {
1822 volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \ 1819 volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
1823 (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000)); 1820 (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
1824 volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \ 1821 volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
1825 (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000)); 1822 (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
1826 } 1823 }
1827 1824
1828 if (volt_without_cks >= volt_with_cks) 1825 if (volt_without_cks >= volt_with_cks)
1829 volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks + 1826 volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
1830 sclk_table->entries[i].cks_voffset) * 100 / 625); 1827 sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
1831 1828
1832 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; 1829 data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
1833 } 1830 }
@@ -2685,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
2685{ 2682{
2686 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2683 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2687 uint16_t vv_id; 2684 uint16_t vv_id;
2688 uint16_t vddc = 0; 2685 uint32_t vddc = 0;
2689 uint16_t i, j; 2686 uint16_t i, j;
2690 uint32_t sclk = 0; 2687 uint32_t sclk = 0;
2691 struct phm_ppt_v1_information *table_info = 2688 struct phm_ppt_v1_information *table_info =
@@ -2716,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
2716 continue); 2713 continue);
2717 2714
2718 2715
2719 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ 2716 /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
2720 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), 2717 * real voltage level in unit of 0.01mv */
2718 PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
2721 "Invalid VDDC value", result = -EINVAL;); 2719 "Invalid VDDC value", result = -EINVAL;);
2722 2720
2723 /* the voltage should not be zero nor equal to leakage ID */ 2721 /* the voltage should not be zero nor equal to leakage ID */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index bf4e18fd3872..90b35c5c10a4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1256} 1256}
1257 1257
1258int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 1258int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
1259 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage) 1259 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
1260{ 1260{
1261 1261
1262 int result; 1262 int result;
@@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
1274 if (0 != result) 1274 if (0 != result)
1275 return result; 1275 return result;
1276 1276
1277 *voltage = get_voltage_info_param_space.usVoltageLevel; 1277 *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
1278 1278
1279 return result; 1279 return result;
1280} 1280}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 248c5db5f380..1e35a9625baf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -305,7 +305,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
305extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, 305extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
306 uint8_t level); 306 uint8_t level);
307extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 307extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
308 uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 308 uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
309extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); 309extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
310 310
311extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); 311extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 233eb7f36c1d..5d0f655bf160 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1302 table->Smio[count] |= 1302 table->Smio[count] |=
1303 data->mvdd_voltage_table.entries[count].smio_low; 1303 data->mvdd_voltage_table.entries[count].smio_low;
1304 } 1304 }
1305 table->SmioMask2 = data->vddci_voltage_table.mask_low; 1305 table->SmioMask2 = data->mvdd_voltage_table.mask_low;
1306 1306
1307 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount); 1307 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1308 } 1308 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index 671fdb4d615a..dccc859f638c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
302 (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset)); 302 (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
303 303
304 if (0 != powerplay_table->usPPMTableOffset) { 304 if (0 != powerplay_table->usPPMTableOffset) {
305 if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) { 305 if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
306 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 306 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
307 PHM_PlatformCaps_EnablePlatformPowerManagement); 307 PHM_PlatformCaps_EnablePlatformPowerManagement);
308 } 308 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 22706c0a54b5..49bd5da194e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -40,7 +40,8 @@ static int
40gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) 40gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
41{ 41{
42 struct nvkm_device *device = outp->base.disp->engine.subdev.device; 42 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
43 nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); 43 const u32 soff = gf119_sor_soff(outp);
44 nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
44 return 0; 45 return 0;
45} 46}
46 47
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 4182a21f5923..41cacecbea9a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
65 DRM_DEBUG_DRIVER("Disabling the CRTC\n"); 65 DRM_DEBUG_DRIVER("Disabling the CRTC\n");
66 66
67 sun4i_tcon_disable(drv->tcon); 67 sun4i_tcon_disable(drv->tcon);
68
69 if (crtc->state->event && !crtc->state->active) {
70 spin_lock_irq(&crtc->dev->event_lock);
71 drm_crtc_send_vblank_event(crtc, crtc->state->event);
72 spin_unlock_irq(&crtc->dev->event_lock);
73
74 crtc->state->event = NULL;
75 }
68} 76}
69 77
70static void sun4i_crtc_enable(struct drm_crtc *crtc) 78static void sun4i_crtc_enable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 257d2b4f3645..937394cbc241 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
92 /* Frame Buffer Operations */ 92 /* Frame Buffer Operations */
93 93
94 /* VBlank Operations */ 94 /* VBlank Operations */
95 .get_vblank_counter = drm_vblank_count, 95 .get_vblank_counter = drm_vblank_no_hw_counter,
96 .enable_vblank = sun4i_drv_enable_vblank, 96 .enable_vblank = sun4i_drv_enable_vblank,
97 .disable_vblank = sun4i_drv_disable_vblank, 97 .disable_vblank = sun4i_drv_disable_vblank,
98}; 98};
@@ -310,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
310 310
311 count += sun4i_drv_add_endpoints(&pdev->dev, &match, 311 count += sun4i_drv_add_endpoints(&pdev->dev, &match,
312 pipeline); 312 pipeline);
313 of_node_put(pipeline);
313 314
314 DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n", 315 DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
315 count, i); 316 count, i);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 8a4adbeb2b8c..70ed1d0151b8 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
718 718
719 spin_lock_irqsave(&gic_lock, flags); 719 spin_lock_irqsave(&gic_lock, flags);
720 gic_map_to_pin(intr, gic_cpu_pin); 720 gic_map_to_pin(intr, gic_cpu_pin);
721 gic_map_to_vpe(intr, vpe); 721 gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
722 for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 722 for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
723 clear_bit(intr, pcpu_masks[i].pcpu_mask); 723 clear_bit(intr, pcpu_masks[i].pcpu_mask);
724 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 724 set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
959 switch (bus_token) { 959 switch (bus_token) {
960 case DOMAIN_BUS_IPI: 960 case DOMAIN_BUS_IPI:
961 is_ipi = d->bus_token == bus_token; 961 is_ipi = d->bus_token == bus_token;
962 return to_of_node(d->fwnode) == node && is_ipi; 962 return (!node || to_of_node(d->fwnode) == node) && is_ipi;
963 break; 963 break;
964 default: 964 default:
965 return 0; 965 return 0;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index ca81f46ea1aa..edc70ffad660 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
101#define MAC_ADDRESS_EQUAL(A, B) \ 101#define MAC_ADDRESS_EQUAL(A, B) \
102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B) 102 ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
103 103
104static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } }; 104static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
105 0, 0, 0, 0, 0, 0
106};
105static u16 ad_ticks_per_sec; 107static u16 ad_ticks_per_sec;
106static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; 108static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
107 109
108static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 110static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
111 MULTICAST_LACPDU_ADDR;
109 112
110/* ================= main 802.3ad protocol functions ================== */ 113/* ================= main 802.3ad protocol functions ================== */
111static int ad_lacpdu_send(struct port *port); 114static int ad_lacpdu_send(struct port *port);
@@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
1739 aggregator->is_individual = false; 1742 aggregator->is_individual = false;
1740 aggregator->actor_admin_aggregator_key = 0; 1743 aggregator->actor_admin_aggregator_key = 0;
1741 aggregator->actor_oper_aggregator_key = 0; 1744 aggregator->actor_oper_aggregator_key = 0;
1742 aggregator->partner_system = null_mac_addr; 1745 eth_zero_addr(aggregator->partner_system.mac_addr_value);
1743 aggregator->partner_system_priority = 0; 1746 aggregator->partner_system_priority = 0;
1744 aggregator->partner_oper_aggregator_key = 0; 1747 aggregator->partner_oper_aggregator_key = 0;
1745 aggregator->receive_state = 0; 1748 aggregator->receive_state = 0;
@@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
1761 if (aggregator) { 1764 if (aggregator) {
1762 ad_clear_agg(aggregator); 1765 ad_clear_agg(aggregator);
1763 1766
1764 aggregator->aggregator_mac_address = null_mac_addr; 1767 eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
1765 aggregator->aggregator_identifier = 0; 1768 aggregator->aggregator_identifier = 0;
1766 aggregator->slave = NULL; 1769 aggregator->slave = NULL;
1767 } 1770 }
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c5ac160a8ae9..551f0f8dead3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -42,13 +42,10 @@
42 42
43 43
44 44
45#ifndef __long_aligned 45static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
46#define __long_aligned __attribute__((aligned((sizeof(long)))))
47#endif
48static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
49 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 46 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
50}; 47};
51static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = { 48static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
52 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 49 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
53}; 50};
54static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 51static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99cd3b6..a2afa3be17a4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1584 } 1584 }
1585 1585
1586 /* check for initial state */ 1586 /* check for initial state */
1587 new_slave->link = BOND_LINK_NOCHANGE;
1587 if (bond->params.miimon) { 1588 if (bond->params.miimon) {
1588 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1589 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1589 if (bond->params.updelay) { 1590 if (bond->params.updelay) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 543bf38105c9..bfa26a2590c9 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
392 else 392 else
393 p = (char *)priv; 393 p = (char *)priv;
394 p += s->stat_offset; 394 p += s->stat_offset;
395 data[i] = *(u32 *)p; 395 data[i] = *(unsigned long *)p;
396 } 396 }
397} 397}
398 398
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7d43..2accab386323 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
36#define __T4FW_VERSION_H__ 36#define __T4FW_VERSION_H__
37 37
38#define T4FW_VERSION_MAJOR 0x01 38#define T4FW_VERSION_MAJOR 0x01
39#define T4FW_VERSION_MINOR 0x0E 39#define T4FW_VERSION_MINOR 0x0F
40#define T4FW_VERSION_MICRO 0x04 40#define T4FW_VERSION_MICRO 0x25
41#define T4FW_VERSION_BUILD 0x00 41#define T4FW_VERSION_BUILD 0x00
42 42
43#define T4FW_MIN_VERSION_MAJOR 0x01 43#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
45#define T4FW_MIN_VERSION_MICRO 0x00 45#define T4FW_MIN_VERSION_MICRO 0x00
46 46
47#define T5FW_VERSION_MAJOR 0x01 47#define T5FW_VERSION_MAJOR 0x01
48#define T5FW_VERSION_MINOR 0x0E 48#define T5FW_VERSION_MINOR 0x0F
49#define T5FW_VERSION_MICRO 0x04 49#define T5FW_VERSION_MICRO 0x25
50#define T5FW_VERSION_BUILD 0x00 50#define T5FW_VERSION_BUILD 0x00
51 51
52#define T5FW_MIN_VERSION_MAJOR 0x00 52#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
54#define T5FW_MIN_VERSION_MICRO 0x00 54#define T5FW_MIN_VERSION_MICRO 0x00
55 55
56#define T6FW_VERSION_MAJOR 0x01 56#define T6FW_VERSION_MAJOR 0x01
57#define T6FW_VERSION_MINOR 0x0E 57#define T6FW_VERSION_MINOR 0x0F
58#define T6FW_VERSION_MICRO 0x04 58#define T6FW_VERSION_MICRO 0x25
59#define T6FW_VERSION_BUILD 0x00 59#define T6FW_VERSION_BUILD 0x00
60 60
61#define T6FW_MIN_VERSION_MAJOR 0x00 61#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 73f745205a1c..2b2e2f8c6369 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
154 writel(val, hw->hw_addr + reg); 154 writel(val, hw->hw_addr + reg);
155} 155}
156 156
157static bool e1000e_vlan_used(struct e1000_adapter *adapter)
158{
159 u16 vid;
160
161 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
162 return true;
163
164 return false;
165}
166
167/** 157/**
168 * e1000_regdump - register printout routine 158 * e1000_regdump - register printout routine
169 * @hw: pointer to the HW structure 159 * @hw: pointer to the HW structure
@@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3453 3443
3454 ew32(RCTL, rctl); 3444 ew32(RCTL, rctl);
3455 3445
3456 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX || 3446 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3457 e1000e_vlan_used(adapter))
3458 e1000e_vlan_strip_enable(adapter); 3447 e1000e_vlan_strip_enable(adapter);
3459 else 3448 else
3460 e1000e_vlan_strip_disable(adapter); 3449 e1000e_vlan_strip_disable(adapter);
@@ -6926,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
6926 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) 6915 if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
6927 features &= ~NETIF_F_RXFCS; 6916 features &= ~NETIF_F_RXFCS;
6928 6917
6918 /* Since there is no support for separate Rx/Tx vlan accel
6919 * enable/disable make sure Tx flag is always in same state as Rx.
6920 */
6921 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6922 features |= NETIF_F_HW_VLAN_CTAG_TX;
6923 else
6924 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
6925
6929 return features; 6926 return features;
6930} 6927}
6931 6928
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 61a80da8b6f0..2819abc454c7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 85static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
86{ 86{
87 struct ixgbe_mbx_info *mbx = &hw->mbx; 87 struct ixgbe_mbx_info *mbx = &hw->mbx;
88 s32 ret_val = -IXGBE_ERR_MBX; 88 s32 ret_val = IXGBE_ERR_MBX;
89 89
90 if (!mbx->ops.read) 90 if (!mbx->ops.read)
91 goto out; 91 goto out;
@@ -111,7 +111,7 @@ out:
111static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) 111static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
112{ 112{
113 struct ixgbe_mbx_info *mbx = &hw->mbx; 113 struct ixgbe_mbx_info *mbx = &hw->mbx;
114 s32 ret_val = -IXGBE_ERR_MBX; 114 s32 ret_val = IXGBE_ERR_MBX;
115 115
116 /* exit if either we can't write or there isn't a defined timeout */ 116 /* exit if either we can't write or there isn't a defined timeout */
117 if (!mbx->ops.write || !mbx->timeout) 117 if (!mbx->ops.write || !mbx->timeout)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351dfc..d5d263bda333 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
3458 return 0; 3458 return 0;
3459 3459
3460err_free_irq: 3460err_free_irq:
3461 unregister_cpu_notifier(&pp->cpu_notifier);
3462 on_each_cpu(mvneta_percpu_disable, pp, true);
3461 free_percpu_irq(pp->dev->irq, pp->ports); 3463 free_percpu_irq(pp->dev->irq, pp->ports);
3462err_cleanup_txqs: 3464err_cleanup_txqs:
3463 mvneta_cleanup_txqs(pp); 3465 mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 0b4986268cc9..d6e2a1cae19a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 297 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
298 case MLX5_CMD_OP_2ERR_QP:
299 case MLX5_CMD_OP_2RST_QP:
300 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
301 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
302 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
303 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
298 return MLX5_CMD_STAT_OK; 304 return MLX5_CMD_STAT_OK;
299 305
300 case MLX5_CMD_OP_QUERY_HCA_CAP: 306 case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
321 case MLX5_CMD_OP_RTR2RTS_QP: 327 case MLX5_CMD_OP_RTR2RTS_QP:
322 case MLX5_CMD_OP_RTS2RTS_QP: 328 case MLX5_CMD_OP_RTS2RTS_QP:
323 case MLX5_CMD_OP_SQERR2RTS_QP: 329 case MLX5_CMD_OP_SQERR2RTS_QP:
324 case MLX5_CMD_OP_2ERR_QP:
325 case MLX5_CMD_OP_2RST_QP:
326 case MLX5_CMD_OP_QUERY_QP: 330 case MLX5_CMD_OP_QUERY_QP:
327 case MLX5_CMD_OP_SQD_RTS_QP: 331 case MLX5_CMD_OP_SQD_RTS_QP:
328 case MLX5_CMD_OP_INIT2INIT_QP: 332 case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
342 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 346 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
343 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 347 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
344 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 348 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
345 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
346 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 349 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
347 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 350 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
348 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 351 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
390 case MLX5_CMD_OP_CREATE_RQT: 393 case MLX5_CMD_OP_CREATE_RQT:
391 case MLX5_CMD_OP_MODIFY_RQT: 394 case MLX5_CMD_OP_MODIFY_RQT:
392 case MLX5_CMD_OP_QUERY_RQT: 395 case MLX5_CMD_OP_QUERY_RQT:
396
393 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 397 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
394 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 398 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
395 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 399 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
396 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 400 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
397 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 401
398 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 402 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
399 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 403 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
400 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 404 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
602 pr_debug("\n"); 606 pr_debug("\n");
603} 607}
604 608
609static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
610{
611 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
612
613 return be16_to_cpu(hdr->opcode);
614}
615
616static void cb_timeout_handler(struct work_struct *work)
617{
618 struct delayed_work *dwork = container_of(work, struct delayed_work,
619 work);
620 struct mlx5_cmd_work_ent *ent = container_of(dwork,
621 struct mlx5_cmd_work_ent,
622 cb_timeout_work);
623 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
624 cmd);
625
626 ent->ret = -ETIMEDOUT;
627 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
628 mlx5_command_str(msg_to_opcode(ent->in)),
629 msg_to_opcode(ent->in));
630 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
631}
632
605static void cmd_work_handler(struct work_struct *work) 633static void cmd_work_handler(struct work_struct *work)
606{ 634{
607 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 635 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
608 struct mlx5_cmd *cmd = ent->cmd; 636 struct mlx5_cmd *cmd = ent->cmd;
609 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 637 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
638 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
610 struct mlx5_cmd_layout *lay; 639 struct mlx5_cmd_layout *lay;
611 struct semaphore *sem; 640 struct semaphore *sem;
612 unsigned long flags; 641 unsigned long flags;
@@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
647 dump_command(dev, ent, 1); 676 dump_command(dev, ent, 1);
648 ent->ts1 = ktime_get_ns(); 677 ent->ts1 = ktime_get_ns();
649 678
679 if (ent->callback)
680 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
681
650 /* ring doorbell after the descriptor is valid */ 682 /* ring doorbell after the descriptor is valid */
651 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 683 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
652 wmb(); 684 wmb();
@@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
691 } 723 }
692} 724}
693 725
694static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
695{
696 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
697
698 return be16_to_cpu(hdr->opcode);
699}
700
701static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 726static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
702{ 727{
703 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 728 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
706 731
707 if (cmd->mode == CMD_MODE_POLLING) { 732 if (cmd->mode == CMD_MODE_POLLING) {
708 wait_for_completion(&ent->done); 733 wait_for_completion(&ent->done);
709 err = ent->ret; 734 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
710 } else { 735 ent->ret = -ETIMEDOUT;
711 if (!wait_for_completion_timeout(&ent->done, timeout)) 736 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
712 err = -ETIMEDOUT;
713 else
714 err = 0;
715 } 737 }
738
739 err = ent->ret;
740
716 if (err == -ETIMEDOUT) { 741 if (err == -ETIMEDOUT) {
717 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 742 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
718 mlx5_command_str(msg_to_opcode(ent->in)), 743 mlx5_command_str(msg_to_opcode(ent->in)),
@@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
761 if (!callback) 786 if (!callback)
762 init_completion(&ent->done); 787 init_completion(&ent->done);
763 788
789 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
764 INIT_WORK(&ent->work, cmd_work_handler); 790 INIT_WORK(&ent->work, cmd_work_handler);
765 if (page_queue) { 791 if (page_queue) {
766 cmd_work_handler(&ent->work); 792 cmd_work_handler(&ent->work);
@@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
770 goto out_free; 796 goto out_free;
771 } 797 }
772 798
773 if (!callback) { 799 if (callback)
774 err = wait_func(dev, ent); 800 goto out;
775 if (err == -ETIMEDOUT)
776 goto out;
777
778 ds = ent->ts2 - ent->ts1;
779 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
780 if (op < ARRAY_SIZE(cmd->stats)) {
781 stats = &cmd->stats[op];
782 spin_lock_irq(&stats->lock);
783 stats->sum += ds;
784 ++stats->n;
785 spin_unlock_irq(&stats->lock);
786 }
787 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
788 "fw exec time for %s is %lld nsec\n",
789 mlx5_command_str(op), ds);
790 *status = ent->status;
791 free_cmd(ent);
792 }
793 801
794 return err; 802 err = wait_func(dev, ent);
803 if (err == -ETIMEDOUT)
804 goto out_free;
805
806 ds = ent->ts2 - ent->ts1;
807 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
808 if (op < ARRAY_SIZE(cmd->stats)) {
809 stats = &cmd->stats[op];
810 spin_lock_irq(&stats->lock);
811 stats->sum += ds;
812 ++stats->n;
813 spin_unlock_irq(&stats->lock);
814 }
815 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
816 "fw exec time for %s is %lld nsec\n",
817 mlx5_command_str(op), ds);
818 *status = ent->status;
795 819
796out_free: 820out_free:
797 free_cmd(ent); 821 free_cmd(ent);
@@ -1181,41 +1205,30 @@ err_dbg:
1181 return err; 1205 return err;
1182} 1206}
1183 1207
1184void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1208static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1185{ 1209{
1186 struct mlx5_cmd *cmd = &dev->cmd; 1210 struct mlx5_cmd *cmd = &dev->cmd;
1187 int i; 1211 int i;
1188 1212
1189 for (i = 0; i < cmd->max_reg_cmds; i++) 1213 for (i = 0; i < cmd->max_reg_cmds; i++)
1190 down(&cmd->sem); 1214 down(&cmd->sem);
1191
1192 down(&cmd->pages_sem); 1215 down(&cmd->pages_sem);
1193 1216
1194 flush_workqueue(cmd->wq); 1217 cmd->mode = mode;
1195
1196 cmd->mode = CMD_MODE_EVENTS;
1197 1218
1198 up(&cmd->pages_sem); 1219 up(&cmd->pages_sem);
1199 for (i = 0; i < cmd->max_reg_cmds; i++) 1220 for (i = 0; i < cmd->max_reg_cmds; i++)
1200 up(&cmd->sem); 1221 up(&cmd->sem);
1201} 1222}
1202 1223
1203void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1224void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1204{ 1225{
1205 struct mlx5_cmd *cmd = &dev->cmd; 1226 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1206 int i; 1227}
1207
1208 for (i = 0; i < cmd->max_reg_cmds; i++)
1209 down(&cmd->sem);
1210
1211 down(&cmd->pages_sem);
1212
1213 flush_workqueue(cmd->wq);
1214 cmd->mode = CMD_MODE_POLLING;
1215 1228
1216 up(&cmd->pages_sem); 1229void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1217 for (i = 0; i < cmd->max_reg_cmds; i++) 1230{
1218 up(&cmd->sem); 1231 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1219} 1232}
1220 1233
1221static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1234static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1251 struct semaphore *sem; 1264 struct semaphore *sem;
1252 1265
1253 ent = cmd->ent_arr[i]; 1266 ent = cmd->ent_arr[i];
1267 if (ent->callback)
1268 cancel_delayed_work(&ent->cb_timeout_work);
1254 if (ent->page_queue) 1269 if (ent->page_queue)
1255 sem = &cmd->pages_sem; 1270 sem = &cmd->pages_sem;
1256 else 1271 else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index baa991a23475..943b1bd434bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
145 145
146#ifdef CONFIG_MLX5_CORE_EN_DCB 146#ifdef CONFIG_MLX5_CORE_EN_DCB
147#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ 147#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
148#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
149#endif 148#endif
150 149
151struct mlx5e_params { 150struct mlx5e_params {
@@ -191,6 +190,7 @@ struct mlx5e_tstamp {
191enum { 190enum {
192 MLX5E_RQ_STATE_POST_WQES_ENABLE, 191 MLX5E_RQ_STATE_POST_WQES_ENABLE,
193 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, 192 MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
193 MLX5E_RQ_STATE_FLUSH_TIMEOUT,
194}; 194};
195 195
196struct mlx5e_cq { 196struct mlx5e_cq {
@@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
220typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, 220typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
221 u16 ix); 221 u16 ix);
222 222
223typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
224
223struct mlx5e_dma_info { 225struct mlx5e_dma_info {
224 struct page *page; 226 struct page *page;
225 dma_addr_t addr; 227 dma_addr_t addr;
@@ -241,6 +243,7 @@ struct mlx5e_rq {
241 struct mlx5e_cq cq; 243 struct mlx5e_cq cq;
242 mlx5e_fp_handle_rx_cqe handle_rx_cqe; 244 mlx5e_fp_handle_rx_cqe handle_rx_cqe;
243 mlx5e_fp_alloc_wqe alloc_wqe; 245 mlx5e_fp_alloc_wqe alloc_wqe;
246 mlx5e_fp_dealloc_wqe dealloc_wqe;
244 247
245 unsigned long state; 248 unsigned long state;
246 int ix; 249 int ix;
@@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
305enum { 308enum {
306 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, 309 MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
307 MLX5E_SQ_STATE_BF_ENABLE, 310 MLX5E_SQ_STATE_BF_ENABLE,
311 MLX5E_SQ_STATE_TX_TIMEOUT,
308}; 312};
309 313
310struct mlx5e_ico_wqe_info { 314struct mlx5e_ico_wqe_info {
@@ -538,6 +542,7 @@ struct mlx5e_priv {
538 struct workqueue_struct *wq; 542 struct workqueue_struct *wq;
539 struct work_struct update_carrier_work; 543 struct work_struct update_carrier_work;
540 struct work_struct set_rx_mode_work; 544 struct work_struct set_rx_mode_work;
545 struct work_struct tx_timeout_work;
541 struct delayed_work update_stats_work; 546 struct delayed_work update_stats_work;
542 547
543 struct mlx5_core_dev *mdev; 548 struct mlx5_core_dev *mdev;
@@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
589int mlx5e_napi_poll(struct napi_struct *napi, int budget); 594int mlx5e_napi_poll(struct napi_struct *napi, int budget);
590bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 595bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
591int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 596int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
597void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
598void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
592 599
593void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 600void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
594void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 601void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
595bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); 602bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
596int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 603int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
597int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); 604int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
605void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
606void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
598void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); 607void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
599void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, 608void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
600 struct mlx5_cqe64 *cqe, 609 struct mlx5_cqe64 *cqe,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index b2db180ae2a5..c585349e05c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; 96 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
97 break; 97 break;
98 case IEEE_8021QAZ_TSA_ETS: 98 case IEEE_8021QAZ_TSA_ETS:
99 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; 99 tc_tx_bw[i] = ets->tc_tx_bw[i];
100 break; 100 break;
101 } 101 }
102 } 102 }
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
140 140
141 /* Validate Bandwidth Sum */ 141 /* Validate Bandwidth Sum */
142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 142 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) 143 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
144 if (!ets->tc_tx_bw[i])
145 return -EINVAL;
146
144 bw_sum += ets->tc_tx_bw[i]; 147 bw_sum += ets->tc_tx_bw[i];
148 }
145 } 149 }
146 150
147 if (bw_sum != 0 && bw_sum != 100) 151 if (bw_sum != 0 && bw_sum != 100)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cb6defd71fc1..7a0dca29c642 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,13 @@
39#include "eswitch.h" 39#include "eswitch.h"
40#include "vxlan.h" 40#include "vxlan.h"
41 41
42enum {
43 MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
44 MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
45 MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
46 MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
47};
48
42struct mlx5e_rq_param { 49struct mlx5e_rq_param {
43 u32 rqc[MLX5_ST_SZ_DW(rqc)]; 50 u32 rqc[MLX5_ST_SZ_DW(rqc)];
44 struct mlx5_wq_param wq; 51 struct mlx5_wq_param wq;
@@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
74 port_state = mlx5_query_vport_state(mdev, 81 port_state = mlx5_query_vport_state(mdev,
75 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0); 82 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
76 83
77 if (port_state == VPORT_STATE_UP) 84 if (port_state == VPORT_STATE_UP) {
85 netdev_info(priv->netdev, "Link up\n");
78 netif_carrier_on(priv->netdev); 86 netif_carrier_on(priv->netdev);
79 else 87 } else {
88 netdev_info(priv->netdev, "Link down\n");
80 netif_carrier_off(priv->netdev); 89 netif_carrier_off(priv->netdev);
90 }
81} 91}
82 92
83static void mlx5e_update_carrier_work(struct work_struct *work) 93static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
91 mutex_unlock(&priv->state_lock); 101 mutex_unlock(&priv->state_lock);
92} 102}
93 103
104static void mlx5e_tx_timeout_work(struct work_struct *work)
105{
106 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
107 tx_timeout_work);
108 int err;
109
110 rtnl_lock();
111 mutex_lock(&priv->state_lock);
112 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
113 goto unlock;
114 mlx5e_close_locked(priv->netdev);
115 err = mlx5e_open_locked(priv->netdev);
116 if (err)
117 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
118 err);
119unlock:
120 mutex_unlock(&priv->state_lock);
121 rtnl_unlock();
122}
123
94static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) 124static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
95{ 125{
96 struct mlx5e_sw_stats *s = &priv->stats.sw; 126 struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -305,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
305 } 335 }
306 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; 336 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
307 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; 337 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
338 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
308 339
309 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); 340 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
310 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); 341 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -320,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
320 } 351 }
321 rq->handle_rx_cqe = mlx5e_handle_rx_cqe; 352 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
322 rq->alloc_wqe = mlx5e_alloc_rx_wqe; 353 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
354 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
323 355
324 rq->wqe_sz = (priv->params.lro_en) ? 356 rq->wqe_sz = (priv->params.lro_en) ?
325 priv->params.lro_wqe_sz : 357 priv->params.lro_wqe_sz :
@@ -525,17 +557,25 @@ err_destroy_rq:
525 557
526static void mlx5e_close_rq(struct mlx5e_rq *rq) 558static void mlx5e_close_rq(struct mlx5e_rq *rq)
527{ 559{
560 int tout = 0;
561 int err;
562
528 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); 563 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
529 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ 564 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
530 565
531 mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); 566 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
532 while (!mlx5_wq_ll_is_empty(&rq->wq)) 567 while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
533 msleep(20); 568 tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
569 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
570
571 if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
572 set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
534 573
535 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ 574 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
536 napi_synchronize(&rq->channel->napi); 575 napi_synchronize(&rq->channel->napi);
537 576
538 mlx5e_disable_rq(rq); 577 mlx5e_disable_rq(rq);
578 mlx5e_free_rx_descs(rq);
539 mlx5e_destroy_rq(rq); 579 mlx5e_destroy_rq(rq);
540} 580}
541 581
@@ -782,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
782 822
783static void mlx5e_close_sq(struct mlx5e_sq *sq) 823static void mlx5e_close_sq(struct mlx5e_sq *sq)
784{ 824{
825 int tout = 0;
826 int err;
827
785 if (sq->txq) { 828 if (sq->txq) {
786 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); 829 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
787 /* prevent netif_tx_wake_queue */ 830 /* prevent netif_tx_wake_queue */
@@ -792,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
792 if (mlx5e_sq_has_room_for(sq, 1)) 835 if (mlx5e_sq_has_room_for(sq, 1))
793 mlx5e_send_nop(sq, true); 836 mlx5e_send_nop(sq, true);
794 837
795 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); 838 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
839 MLX5_SQC_STATE_ERR);
840 if (err)
841 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
796 } 842 }
797 843
798 while (sq->cc != sq->pc) /* wait till sq is empty */ 844 /* wait till sq is empty, unless a TX timeout occurred on this SQ */
799 msleep(20); 845 while (sq->cc != sq->pc &&
846 !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
847 msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
848 if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
849 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
850 }
800 851
801 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ 852 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
802 napi_synchronize(&sq->channel->napi); 853 napi_synchronize(&sq->channel->napi);
803 854
855 mlx5e_free_tx_descs(sq);
804 mlx5e_disable_sq(sq); 856 mlx5e_disable_sq(sq);
805 mlx5e_destroy_sq(sq); 857 mlx5e_destroy_sq(sq);
806} 858}
@@ -1658,8 +1710,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
1658 1710
1659 netdev_set_num_tc(netdev, ntc); 1711 netdev_set_num_tc(netdev, ntc);
1660 1712
1713 /* Map netdev TCs to offset 0
1714 * We have our own UP to TXQ mapping for QoS
1715 */
1661 for (tc = 0; tc < ntc; tc++) 1716 for (tc = 0; tc < ntc; tc++)
1662 netdev_set_tc_queue(netdev, tc, nch, tc * nch); 1717 netdev_set_tc_queue(netdev, tc, nch, 0);
1663} 1718}
1664 1719
1665int mlx5e_open_locked(struct net_device *netdev) 1720int mlx5e_open_locked(struct net_device *netdev)
@@ -2590,6 +2645,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2590 return features; 2645 return features;
2591} 2646}
2592 2647
2648static void mlx5e_tx_timeout(struct net_device *dev)
2649{
2650 struct mlx5e_priv *priv = netdev_priv(dev);
2651 bool sched_work = false;
2652 int i;
2653
2654 netdev_err(dev, "TX timeout detected\n");
2655
2656 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
2657 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
2658
2659 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
2660 continue;
2661 sched_work = true;
2662 set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
2663 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2664 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
2665 }
2666
2667 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
2668 schedule_work(&priv->tx_timeout_work);
2669}
2670
2593static const struct net_device_ops mlx5e_netdev_ops_basic = { 2671static const struct net_device_ops mlx5e_netdev_ops_basic = {
2594 .ndo_open = mlx5e_open, 2672 .ndo_open = mlx5e_open,
2595 .ndo_stop = mlx5e_close, 2673 .ndo_stop = mlx5e_close,
@@ -2607,6 +2685,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
2607#ifdef CONFIG_RFS_ACCEL 2685#ifdef CONFIG_RFS_ACCEL
2608 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 2686 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2609#endif 2687#endif
2688 .ndo_tx_timeout = mlx5e_tx_timeout,
2610}; 2689};
2611 2690
2612static const struct net_device_ops mlx5e_netdev_ops_sriov = { 2691static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2636,6 +2715,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2636 .ndo_get_vf_config = mlx5e_get_vf_config, 2715 .ndo_get_vf_config = mlx5e_get_vf_config,
2637 .ndo_set_vf_link_state = mlx5e_set_vf_link_state, 2716 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2638 .ndo_get_vf_stats = mlx5e_get_vf_stats, 2717 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2718 .ndo_tx_timeout = mlx5e_tx_timeout,
2639}; 2719};
2640 2720
2641static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2721static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2838,6 +2918,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2838 2918
2839 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); 2919 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2840 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); 2920 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2921 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
2841 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); 2922 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2842} 2923}
2843 2924
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 022acc2e8922..9f2a16a507e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -212,6 +212,20 @@ err_free_skb:
212 return -ENOMEM; 212 return -ENOMEM;
213} 213}
214 214
215void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
216{
217 struct sk_buff *skb = rq->skb[ix];
218
219 if (skb) {
220 rq->skb[ix] = NULL;
221 dma_unmap_single(rq->pdev,
222 *((dma_addr_t *)skb->cb),
223 rq->wqe_sz,
224 DMA_FROM_DEVICE);
225 dev_kfree_skb(skb);
226 }
227}
228
215static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) 229static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
216{ 230{
217 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; 231 return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
574 return 0; 588 return 0;
575} 589}
576 590
591void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
592{
593 struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
594
595 wi->free_wqe(rq, wi);
596}
597
598void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
599{
600 struct mlx5_wq_ll *wq = &rq->wq;
601 struct mlx5e_rx_wqe *wqe;
602 __be16 wqe_ix_be;
603 u16 wqe_ix;
604
605 while (!mlx5_wq_ll_is_empty(wq)) {
606 wqe_ix_be = *wq->tail_next;
607 wqe_ix = be16_to_cpu(wqe_ix_be);
608 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
609 rq->dealloc_wqe(rq, wqe_ix);
610 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
611 &wqe->next.next_wqe_index);
612 }
613}
614
577#define RQ_CANNOT_POST(rq) \ 615#define RQ_CANNOT_POST(rq) \
578 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ 616 (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
579 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) 617 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
878 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 916 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
879 int work_done = 0; 917 int work_done = 0;
880 918
919 if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
920 return 0;
921
881 if (cq->decmprs_left) 922 if (cq->decmprs_left)
882 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); 923 work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
883 924
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 5a750b9cd006..5740b465ef84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
110{ 110{
111 struct mlx5e_priv *priv = netdev_priv(dev); 111 struct mlx5e_priv *priv = netdev_priv(dev);
112 int channel_ix = fallback(dev, skb); 112 int channel_ix = fallback(dev, skb);
113 int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? 113 int up = 0;
114 skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; 114
115 if (!netdev_get_num_tc(dev))
116 return channel_ix;
117
118 if (skb_vlan_tag_present(skb))
119 up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
120
121 /* channel_ix can be larger than num_channels since
122 * dev->num_real_tx_queues = num_channels * num_tc
123 */
124 if (channel_ix >= priv->params.num_channels)
125 channel_ix = reciprocal_scale(channel_ix,
126 priv->params.num_channels);
115 127
116 return priv->channeltc_to_txq_map[channel_ix][up]; 128 return priv->channeltc_to_txq_map[channel_ix][up];
117} 129}
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
123 * headers and occur before the data gather. 135 * headers and occur before the data gather.
124 * Therefore these headers must be copied into the WQE 136 * Therefore these headers must be copied into the WQE
125 */ 137 */
126#define MLX5E_MIN_INLINE ETH_HLEN 138#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
127 139
128 if (bf) { 140 if (bf) {
129 u16 ihs = skb_headlen(skb); 141 u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
135 return skb_headlen(skb); 147 return skb_headlen(skb);
136 } 148 }
137 149
138 return MLX5E_MIN_INLINE; 150 return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
139} 151}
140 152
141static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 153static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
341 return mlx5e_sq_xmit(sq, skb); 353 return mlx5e_sq_xmit(sq, skb);
342} 354}
343 355
356void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
357{
358 struct mlx5e_tx_wqe_info *wi;
359 struct sk_buff *skb;
360 u16 ci;
361 int i;
362
363 while (sq->cc != sq->pc) {
364 ci = sq->cc & sq->wq.sz_m1;
365 skb = sq->skb[ci];
366 wi = &sq->wqe_info[ci];
367
368 if (!skb) { /* nop */
369 sq->cc++;
370 continue;
371 }
372
373 for (i = 0; i < wi->num_dma; i++) {
374 struct mlx5e_sq_dma *dma =
375 mlx5e_dma_get(sq, sq->dma_fifo_cc++);
376
377 mlx5e_tx_dma_unmap(sq->pdev, dma);
378 }
379
380 dev_kfree_skb_any(skb);
381 sq->cc += wi->num_wqebbs;
382 }
383}
384
344bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 385bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
345{ 386{
346 struct mlx5e_sq *sq; 387 struct mlx5e_sq *sq;
@@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
352 393
353 sq = container_of(cq, struct mlx5e_sq, cq); 394 sq = container_of(cq, struct mlx5e_sq, cq);
354 395
396 if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
397 return false;
398
355 npkts = 0; 399 npkts = 0;
356 nbytes = 0; 400 nbytes = 0;
357 401
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 42d16b9458e4..96a59463ae65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
108 108
109void mlx5_enter_error_state(struct mlx5_core_dev *dev) 109void mlx5_enter_error_state(struct mlx5_core_dev *dev)
110{ 110{
111 mutex_lock(&dev->intf_state_mutex);
111 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) 112 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
112 return; 113 goto unlock;
113 114
114 mlx5_core_err(dev, "start\n"); 115 mlx5_core_err(dev, "start\n");
115 if (pci_channel_offline(dev->pdev) || in_fatal(dev)) 116 if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
116 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 117 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
118 trigger_cmd_completions(dev);
119 }
117 120
118 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0); 121 mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
119 mlx5_core_err(dev, "end\n"); 122 mlx5_core_err(dev, "end\n");
123
124unlock:
125 mutex_unlock(&dev->intf_state_mutex);
120} 126}
121 127
122static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) 128static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
245 u32 count; 251 u32 count;
246 252
247 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 253 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
248 trigger_cmd_completions(dev);
249 mod_timer(&health->timer, get_next_poll_jiffies()); 254 mod_timer(&health->timer, get_next_poll_jiffies());
250 return; 255 return;
251 } 256 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c65f4a13e17e..6695893ddd2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
1422 mlx5_pci_err_detected(dev->pdev, 0); 1422 mlx5_pci_err_detected(dev->pdev, 0);
1423} 1423}
1424 1424
1425/* wait for the device to show vital signs. For now we check 1425/* wait for the device to show vital signs by waiting
1426 * that we can read the device ID and that the health buffer 1426 * for the health counter to start counting.
1427 * shows a non zero value which is different than 0xffffffff
1428 */ 1427 */
1429static void wait_vital(struct pci_dev *pdev) 1428static int wait_vital(struct pci_dev *pdev)
1430{ 1429{
1431 struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1430 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1432 struct mlx5_core_health *health = &dev->priv.health; 1431 struct mlx5_core_health *health = &dev->priv.health;
1433 const int niter = 100; 1432 const int niter = 100;
1433 u32 last_count = 0;
1434 u32 count; 1434 u32 count;
1435 u16 did;
1436 int i; 1435 int i;
1437 1436
1438 /* Wait for firmware to be ready after reset */
1439 msleep(1000);
1440 for (i = 0; i < niter; i++) {
1441 if (pci_read_config_word(pdev, 2, &did)) {
1442 dev_warn(&pdev->dev, "failed reading config word\n");
1443 break;
1444 }
1445 if (did == pdev->device) {
1446 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1447 break;
1448 }
1449 msleep(50);
1450 }
1451 if (i == niter)
1452 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1453
1454 for (i = 0; i < niter; i++) { 1437 for (i = 0; i < niter; i++) {
1455 count = ioread32be(health->health_counter); 1438 count = ioread32be(health->health_counter);
1456 if (count && count != 0xffffffff) { 1439 if (count && count != 0xffffffff) {
1457 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i); 1440 if (last_count && last_count != count) {
1458 break; 1441 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1442 return 0;
1443 }
1444 last_count = count;
1459 } 1445 }
1460 msleep(50); 1446 msleep(50);
1461 } 1447 }
1462 1448
1463 if (i == niter) 1449 return -ETIMEDOUT;
1464 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1465} 1450}
1466 1451
1467static void mlx5_pci_resume(struct pci_dev *pdev) 1452static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
1473 dev_info(&pdev->dev, "%s was called\n", __func__); 1458 dev_info(&pdev->dev, "%s was called\n", __func__);
1474 1459
1475 pci_save_state(pdev); 1460 pci_save_state(pdev);
1476 wait_vital(pdev); 1461 err = wait_vital(pdev);
1462 if (err) {
1463 dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
1464 return;
1465 }
1477 1466
1478 err = mlx5_load_one(dev, priv); 1467 err = mlx5_load_one(dev, priv);
1479 if (err) 1468 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545f1c..32dea3524cee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
345 func_id, npages, err); 345 func_id, npages, err);
346 goto out_4k; 346 goto out_4k;
347 } 347 }
348 dev->priv.fw_pages += npages;
349 348
350 err = mlx5_cmd_status_to_err(&out.hdr); 349 err = mlx5_cmd_status_to_err(&out.hdr);
351 if (err) { 350 if (err) {
@@ -373,6 +372,33 @@ out_free:
373 return err; 372 return err;
374} 373}
375 374
375static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
376 struct mlx5_manage_pages_inbox *in, int in_size,
377 struct mlx5_manage_pages_outbox *out, int out_size)
378{
379 struct fw_page *fwp;
380 struct rb_node *p;
381 u32 npages;
382 u32 i = 0;
383
384 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
385 return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
386 (u32 *)out, out_size);
387
388 npages = be32_to_cpu(in->num_entries);
389
390 p = rb_first(&dev->priv.page_root);
391 while (p && i < npages) {
392 fwp = rb_entry(p, struct fw_page, rb_node);
393 out->pas[i] = cpu_to_be64(fwp->addr);
394 p = rb_next(p);
395 i++;
396 }
397
398 out->num_entries = cpu_to_be32(i);
399 return 0;
400}
401
376static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, 402static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
377 int *nclaimed) 403 int *nclaimed)
378{ 404{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
398 in.func_id = cpu_to_be16(func_id); 424 in.func_id = cpu_to_be16(func_id);
399 in.num_entries = cpu_to_be32(npages); 425 in.num_entries = cpu_to_be32(npages);
400 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); 426 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
401 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 427 err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
402 if (err) { 428 if (err) {
403 mlx5_core_err(dev, "failed reclaiming pages\n"); 429 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
404 goto out_free;
405 }
406 dev->priv.fw_pages -= npages;
407
408 if (out->hdr.status) {
409 err = mlx5_cmd_status_to_err(&out->hdr);
410 goto out_free; 430 goto out_free;
411 } 431 }
412 432
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
417 err = -EINVAL; 437 err = -EINVAL;
418 goto out_free; 438 goto out_free;
419 } 439 }
420 if (nclaimed)
421 *nclaimed = num_claimed;
422 440
423 for (i = 0; i < num_claimed; i++) { 441 for (i = 0; i < num_claimed; i++) {
424 addr = be64_to_cpu(out->pas[i]); 442 addr = be64_to_cpu(out->pas[i]);
425 free_4k(dev, addr); 443 free_4k(dev, addr);
426 } 444 }
445
446 if (nclaimed)
447 *nclaimed = num_claimed;
448
427 dev->priv.fw_pages -= num_claimed; 449 dev->priv.fw_pages -= num_claimed;
428 if (func_id) 450 if (func_id)
429 dev->priv.vfs_pages -= num_claimed; 451 dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
514 p = rb_first(&dev->priv.page_root); 536 p = rb_first(&dev->priv.page_root);
515 if (p) { 537 if (p) {
516 fwp = rb_entry(p, struct fw_page, rb_node); 538 fwp = rb_entry(p, struct fw_page, rb_node);
517 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 539 err = reclaim_pages(dev, fwp->func_id,
518 free_4k(dev, fwp->addr); 540 optimal_reclaimed_pages(),
519 nclaimed = 1; 541 &nclaimed);
520 } else { 542
521 err = reclaim_pages(dev, fwp->func_id,
522 optimal_reclaimed_pages(),
523 &nclaimed);
524 }
525 if (err) { 543 if (err) {
526 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", 544 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
527 err); 545 err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
536 } 554 }
537 } while (p); 555 } while (p);
538 556
557 WARN(dev->priv.fw_pages,
558 "FW pages counter is %d after reclaiming all pages\n",
559 dev->priv.fw_pages);
560 WARN(dev->priv.vfs_pages,
561 "VFs FW pages counter is %d after reclaiming all pages\n",
562 dev->priv.vfs_pages);
563
539 return 0; 564 return 0;
540} 565}
541 566
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index daf44cd4c566..91846dfcbe9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
513{ 513{
514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); 514 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
515 void *nic_vport_context; 515 void *nic_vport_context;
516 u8 *guid;
517 void *in; 516 void *in;
518 int err; 517 int err;
519 518
@@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
535 534
536 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, 535 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
537 in, nic_vport_context); 536 in, nic_vport_context);
538 guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
539 node_guid);
540 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); 537 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
541 538
542 err = mlx5_modify_nic_vport_context(mdev, in, inlen); 539 err = mlx5_modify_nic_vport_context(mdev, in, inlen);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 7066954c39d6..0a26b11ca8f6 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1151 enc28j60_phy_read(priv, PHIR); 1151 enc28j60_phy_read(priv, PHIR);
1152 } 1152 }
1153 /* TX complete handler */ 1153 /* TX complete handler */
1154 if ((intflags & EIR_TXIF) != 0) { 1154 if (((intflags & EIR_TXIF) != 0) &&
1155 ((intflags & EIR_TXERIF) == 0)) {
1155 bool err = false; 1156 bool err = false;
1156 loop++; 1157 loop++;
1157 if (netif_msg_intr(priv)) 1158 if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1203 enc28j60_tx_clear(ndev, true); 1204 enc28j60_tx_clear(ndev, true);
1204 } else 1205 } else
1205 enc28j60_tx_clear(ndev, true); 1206 enc28j60_tx_clear(ndev, true);
1206 locked_reg_bfclr(priv, EIR, EIR_TXERIF); 1207 locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
1207 } 1208 }
1208 /* RX Error handler */ 1209 /* RX Error handler */
1209 if ((intflags & EIR_RXERIF) != 0) { 1210 if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
1238 */ 1239 */
1239static void enc28j60_hw_tx(struct enc28j60_net *priv) 1240static void enc28j60_hw_tx(struct enc28j60_net *priv)
1240{ 1241{
1242 BUG_ON(!priv->tx_skb);
1243
1241 if (netif_msg_tx_queued(priv)) 1244 if (netif_msg_tx_queued(priv))
1242 printk(KERN_DEBUG DRV_NAME 1245 printk(KERN_DEBUG DRV_NAME
1243 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1246 ": Tx Packet Len:%d\n", priv->tx_skb->len);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 607bb7d4514d..87c642d3b075 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
772 tx_ring->tx_stats.tx_bytes += skb->len; 772 tx_ring->tx_stats.tx_bytes += skb->len;
773 tx_ring->tx_stats.xmit_called++; 773 tx_ring->tx_stats.xmit_called++;
774 774
775 /* Ensure writes are complete before HW fetches Tx descriptors */
776 wmb();
775 qlcnic_update_cmd_producer(tx_ring); 777 qlcnic_update_cmd_producer(tx_ring);
776 778
777 return NETDEV_TX_OK; 779 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a473c182c91d..e4071265be76 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2804 priv->tx_path_in_lpi_mode = true; 2804 priv->tx_path_in_lpi_mode = true;
2805 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 2805 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2806 priv->tx_path_in_lpi_mode = false; 2806 priv->tx_path_in_lpi_mode = false;
2807 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 2807 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2808 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, 2808 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2809 priv->rx_tail_addr, 2809 priv->rx_tail_addr,
2810 STMMAC_CHAN0); 2810 STMMAC_CHAN0);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index cc39cefeae45..9b3dc3c61e00 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1072 1072
1073static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict) 1073static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1074{ 1074{
1075 struct geneve_dev *geneve = netdev_priv(dev);
1075 /* The max_mtu calculation does not take account of GENEVE 1076 /* The max_mtu calculation does not take account of GENEVE
1076 * options, to avoid excluding potentially valid 1077 * options, to avoid excluding potentially valid
1077 * configurations. 1078 * configurations.
1078 */ 1079 */
1079 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr) 1080 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
1080 - dev->hard_header_len; 1081
1082 if (geneve->remote.sa.sa_family == AF_INET6)
1083 max_mtu -= sizeof(struct ipv6hdr);
1084 else
1085 max_mtu -= sizeof(struct iphdr);
1081 1086
1082 if (new_mtu < 68) 1087 if (new_mtu < 68)
1083 return -EINVAL; 1088 return -EINVAL;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 0e7eff7f1cd2..8bcd78f94966 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
2640 u64_stats_update_begin(&secy_stats->syncp); 2640 u64_stats_update_begin(&secy_stats->syncp);
2641 secy_stats->stats.OutPktsUntagged++; 2641 secy_stats->stats.OutPktsUntagged++;
2642 u64_stats_update_end(&secy_stats->syncp); 2642 u64_stats_update_end(&secy_stats->syncp);
2643 skb->dev = macsec->real_dev;
2643 len = skb->len; 2644 len = skb->len;
2644 ret = dev_queue_xmit(skb); 2645 ret = dev_queue_xmit(skb);
2645 count_tx(dev, ret, len); 2646 count_tx(dev, ret, len);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 2afa61b51d41..91177a4a32ad 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -57,6 +57,7 @@
57 57
58/* PHY CTRL bits */ 58/* PHY CTRL bits */
59#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14 59#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
60#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
60 61
61/* RGMIIDCTL bits */ 62/* RGMIIDCTL bits */
62#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4 63#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
133static int dp83867_config_init(struct phy_device *phydev) 134static int dp83867_config_init(struct phy_device *phydev)
134{ 135{
135 struct dp83867_private *dp83867; 136 struct dp83867_private *dp83867;
136 int ret; 137 int ret, val;
137 u16 val, delay; 138 u16 delay;
138 139
139 if (!phydev->priv) { 140 if (!phydev->priv) {
140 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867), 141 dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
151 } 152 }
152 153
153 if (phy_interface_is_rgmii(phydev)) { 154 if (phy_interface_is_rgmii(phydev)) {
154 ret = phy_write(phydev, MII_DP83867_PHYCTRL, 155 val = phy_read(phydev, MII_DP83867_PHYCTRL);
155 (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT)); 156 if (val < 0)
157 return val;
158 val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
159 val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
160 ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
156 if (ret) 161 if (ret)
157 return ret; 162 return ret;
158 } 163 }
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 53759c315b97..877c9516e781 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
854 if (cdc_ncm_init(dev)) 854 if (cdc_ncm_init(dev))
855 goto error2; 855 goto error2;
856 856
857 /* Some firmwares need a pause here or they will silently fail
858 * to set up the interface properly. This value was decided
859 * empirically on a Sierra Wireless MC7455 running 02.08.02.00
860 * firmware.
861 */
862 usleep_range(10000, 20000);
863
857 /* configure data interface */ 864 /* configure data interface */
858 temp = usb_set_interface(dev->udev, iface_no, data_altsetting); 865 temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
859 if (temp) { 866 if (temp) {
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4e257b8d8f3e..0da72d39b4f9 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -31,7 +31,7 @@
31#define NETNEXT_VERSION "08" 31#define NETNEXT_VERSION "08"
32 32
33/* Information for net */ 33/* Information for net */
34#define NET_VERSION "4" 34#define NET_VERSION "5"
35 35
36#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION 36#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
37#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 37#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -624,6 +624,7 @@ struct r8152 {
624 int (*eee_get)(struct r8152 *, struct ethtool_eee *); 624 int (*eee_get)(struct r8152 *, struct ethtool_eee *);
625 int (*eee_set)(struct r8152 *, struct ethtool_eee *); 625 int (*eee_set)(struct r8152 *, struct ethtool_eee *);
626 bool (*in_nway)(struct r8152 *); 626 bool (*in_nway)(struct r8152 *);
627 void (*autosuspend_en)(struct r8152 *tp, bool enable);
627 } rtl_ops; 628 } rtl_ops;
628 629
629 int intr_interval; 630 int intr_interval;
@@ -2408,9 +2409,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2408 if (enable) { 2409 if (enable) {
2409 u32 ocp_data; 2410 u32 ocp_data;
2410 2411
2411 r8153_u1u2en(tp, false);
2412 r8153_u2p3en(tp, false);
2413
2414 __rtl_set_wol(tp, WAKE_ANY); 2412 __rtl_set_wol(tp, WAKE_ANY);
2415 2413
2416 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2414 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2421,7 +2419,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2421 2419
2422 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2420 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2423 } else { 2421 } else {
2422 u32 ocp_data;
2423
2424 __rtl_set_wol(tp, tp->saved_wolopts); 2424 __rtl_set_wol(tp, tp->saved_wolopts);
2425
2426 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
2427
2428 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
2429 ocp_data &= ~LINK_OFF_WAKE_EN;
2430 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
2431
2432 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2433 }
2434}
2435
2436static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
2437{
2438 rtl_runtime_suspend_enable(tp, enable);
2439
2440 if (enable) {
2441 r8153_u1u2en(tp, false);
2442 r8153_u2p3en(tp, false);
2443 } else {
2425 r8153_u2p3en(tp, true); 2444 r8153_u2p3en(tp, true);
2426 r8153_u1u2en(tp, true); 2445 r8153_u1u2en(tp, true);
2427 } 2446 }
@@ -3512,7 +3531,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3512 napi_disable(&tp->napi); 3531 napi_disable(&tp->napi);
3513 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3532 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3514 rtl_stop_rx(tp); 3533 rtl_stop_rx(tp);
3515 rtl_runtime_suspend_enable(tp, true); 3534 tp->rtl_ops.autosuspend_en(tp, true);
3516 } else { 3535 } else {
3517 cancel_delayed_work_sync(&tp->schedule); 3536 cancel_delayed_work_sync(&tp->schedule);
3518 tp->rtl_ops.down(tp); 3537 tp->rtl_ops.down(tp);
@@ -3538,7 +3557,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3538 3557
3539 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { 3558 if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
3540 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3559 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3541 rtl_runtime_suspend_enable(tp, false); 3560 tp->rtl_ops.autosuspend_en(tp, false);
3542 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3561 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3543 napi_disable(&tp->napi); 3562 napi_disable(&tp->napi);
3544 set_bit(WORK_ENABLE, &tp->flags); 3563 set_bit(WORK_ENABLE, &tp->flags);
@@ -3557,7 +3576,7 @@ static int rtl8152_resume(struct usb_interface *intf)
3557 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3576 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3558 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3577 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3559 if (tp->netdev->flags & IFF_UP) 3578 if (tp->netdev->flags & IFF_UP)
3560 rtl_runtime_suspend_enable(tp, false); 3579 tp->rtl_ops.autosuspend_en(tp, false);
3561 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3580 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3562 } 3581 }
3563 3582
@@ -4137,6 +4156,7 @@ static int rtl_ops_init(struct r8152 *tp)
4137 ops->eee_get = r8152_get_eee; 4156 ops->eee_get = r8152_get_eee;
4138 ops->eee_set = r8152_set_eee; 4157 ops->eee_set = r8152_set_eee;
4139 ops->in_nway = rtl8152_in_nway; 4158 ops->in_nway = rtl8152_in_nway;
4159 ops->autosuspend_en = rtl_runtime_suspend_enable;
4140 break; 4160 break;
4141 4161
4142 case RTL_VER_03: 4162 case RTL_VER_03:
@@ -4152,6 +4172,7 @@ static int rtl_ops_init(struct r8152 *tp)
4152 ops->eee_get = r8153_get_eee; 4172 ops->eee_get = r8153_get_eee;
4153 ops->eee_set = r8153_set_eee; 4173 ops->eee_set = r8153_set_eee;
4154 ops->in_nway = rtl8153_in_nway; 4174 ops->in_nway = rtl8153_in_nway;
4175 ops->autosuspend_en = rtl8153_runtime_enable;
4155 break; 4176 break;
4156 4177
4157 default: 4178 default:
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 61ba46404937..6086a0163249 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -395,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
395 dev->hard_mtu = net->mtu + net->hard_header_len; 395 dev->hard_mtu = net->mtu + net->hard_header_len;
396 if (dev->rx_urb_size == old_hard_mtu) { 396 if (dev->rx_urb_size == old_hard_mtu) {
397 dev->rx_urb_size = dev->hard_mtu; 397 dev->rx_urb_size = dev->hard_mtu;
398 if (dev->rx_urb_size > old_rx_urb_size) 398 if (dev->rx_urb_size > old_rx_urb_size) {
399 usbnet_pause_rx(dev);
399 usbnet_unlink_rx_urbs(dev); 400 usbnet_unlink_rx_urbs(dev);
401 usbnet_resume_rx(dev);
402 }
400 } 403 }
401 404
402 /* max qlen depend on hard_mtu and rx_urb_size */ 405 /* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
1508 } else if (netif_running (dev->net) && 1511 } else if (netif_running (dev->net) &&
1509 netif_device_present (dev->net) && 1512 netif_device_present (dev->net) &&
1510 netif_carrier_ok(dev->net) && 1513 netif_carrier_ok(dev->net) &&
1511 !timer_pending (&dev->delay) && 1514 !timer_pending(&dev->delay) &&
1512 !test_bit (EVENT_RX_HALT, &dev->flags)) { 1515 !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
1516 !test_bit(EVENT_RX_HALT, &dev->flags)) {
1513 int temp = dev->rxq.qlen; 1517 int temp = dev->rxq.qlen;
1514 1518
1515 if (temp < RX_QLEN(dev)) { 1519 if (temp < RX_QLEN(dev)) {
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index 6d8ee3b15872..8abd80dbcbed 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
151 goto exit; 151 goto exit;
152 } 152 }
153 153
154 if (u_cmd.outsize != s_cmd->outsize ||
155 u_cmd.insize != s_cmd->insize) {
156 ret = -EINVAL;
157 goto exit;
158 }
159
154 s_cmd->command += ec->cmd_offset; 160 s_cmd->command += ec->cmd_offset;
155 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); 161 ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
156 /* Only copy data to userland if data was received. */ 162 /* Only copy data to userland if data was received. */
157 if (ret < 0) 163 if (ret < 0)
158 goto exit; 164 goto exit;
159 165
160 if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize)) 166 if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
161 ret = -EFAULT; 167 ret = -EFAULT;
162exit: 168exit:
163 kfree(s_cmd); 169 kfree(s_cmd);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 80b1979e8d95..df036b872b05 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
1051 qeth_l2_set_offline(cgdev); 1051 qeth_l2_set_offline(cgdev);
1052 1052
1053 if (card->dev) { 1053 if (card->dev) {
1054 netif_napi_del(&card->napi);
1054 unregister_netdev(card->dev); 1055 unregister_netdev(card->dev);
1055 card->dev = NULL; 1056 card->dev = NULL;
1056 } 1057 }
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac544330daeb..709b52339ff9 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
3226 qeth_l3_set_offline(cgdev); 3226 qeth_l3_set_offline(cgdev);
3227 3227
3228 if (card->dev) { 3228 if (card->dev) {
3229 netif_napi_del(&card->napi);
3229 unregister_netdev(card->dev); 3230 unregister_netdev(card->dev);
3230 card->dev = NULL; 3231 card->dev = NULL;
3231 } 3232 }
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 076970a54f89..4ce10bcca18b 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -423,36 +423,7 @@ upload:
423 423
424 return 0; 424 return 0;
425} 425}
426static int __init check_prereq(void)
427{
428 struct cpuinfo_x86 *c = &cpu_data(0);
429
430 if (!xen_initial_domain())
431 return -ENODEV;
432
433 if (!acpi_gbl_FADT.smi_command)
434 return -ENODEV;
435
436 if (c->x86_vendor == X86_VENDOR_INTEL) {
437 if (!cpu_has(c, X86_FEATURE_EST))
438 return -ENODEV;
439 426
440 return 0;
441 }
442 if (c->x86_vendor == X86_VENDOR_AMD) {
443 /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
444 * as we get compile warnings for the static functions.
445 */
446#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
447#define USE_HW_PSTATE 0x00000080
448 u32 eax, ebx, ecx, edx;
449 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
450 if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
451 return -ENODEV;
452 return 0;
453 }
454 return -ENODEV;
455}
456/* acpi_perf_data is a pointer to percpu data. */ 427/* acpi_perf_data is a pointer to percpu data. */
457static struct acpi_processor_performance __percpu *acpi_perf_data; 428static struct acpi_processor_performance __percpu *acpi_perf_data;
458 429
@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
509static int __init xen_acpi_processor_init(void) 480static int __init xen_acpi_processor_init(void)
510{ 481{
511 unsigned int i; 482 unsigned int i;
512 int rc = check_prereq(); 483 int rc;
513 484
514 if (rc) 485 if (!xen_initial_domain())
515 return rc; 486 return -ENODEV;
516 487
517 nr_acpi_bits = get_max_acpi_id() + 1; 488 nr_acpi_bits = get_max_acpi_id() + 1;
518 acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); 489 acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index cacf30d14747..7487971f9f78 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
316 rc = -ENOMEM; 316 rc = -ENOMEM;
317 goto out; 317 goto out;
318 } 318 }
319 } else {
320 list_for_each_entry(trans, &u->transactions, list)
321 if (trans->handle.id == u->u.msg.tx_id)
322 break;
323 if (&trans->list == &u->transactions)
324 return -ESRCH;
319 } 325 }
320 326
321 reply = xenbus_dev_request_and_reply(&u->u.msg); 327 reply = xenbus_dev_request_and_reply(&u->u.msg);
322 if (IS_ERR(reply)) { 328 if (IS_ERR(reply)) {
323 kfree(trans); 329 if (msg_type == XS_TRANSACTION_START)
330 kfree(trans);
324 rc = PTR_ERR(reply); 331 rc = PTR_ERR(reply);
325 goto out; 332 goto out;
326 } 333 }
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
333 list_add(&trans->list, &u->transactions); 340 list_add(&trans->list, &u->transactions);
334 } 341 }
335 } else if (u->u.msg.type == XS_TRANSACTION_END) { 342 } else if (u->u.msg.type == XS_TRANSACTION_END) {
336 list_for_each_entry(trans, &u->transactions, list)
337 if (trans->handle.id == u->u.msg.tx_id)
338 break;
339 BUG_ON(&trans->list == &u->transactions);
340 list_del(&trans->list); 343 list_del(&trans->list);
341
342 kfree(trans); 344 kfree(trans);
343 } 345 }
344 346
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 374b12af8812..22f7cd711c57 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -232,10 +232,10 @@ static void transaction_resume(void)
232void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) 232void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
233{ 233{
234 void *ret; 234 void *ret;
235 struct xsd_sockmsg req_msg = *msg; 235 enum xsd_sockmsg_type type = msg->type;
236 int err; 236 int err;
237 237
238 if (req_msg.type == XS_TRANSACTION_START) 238 if (type == XS_TRANSACTION_START)
239 transaction_start(); 239 transaction_start();
240 240
241 mutex_lock(&xs_state.request_mutex); 241 mutex_lock(&xs_state.request_mutex);
@@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
249 249
250 mutex_unlock(&xs_state.request_mutex); 250 mutex_unlock(&xs_state.request_mutex);
251 251
252 if (IS_ERR(ret))
253 return ret;
254
255 if ((msg->type == XS_TRANSACTION_END) || 252 if ((msg->type == XS_TRANSACTION_END) ||
256 ((req_msg.type == XS_TRANSACTION_START) && 253 ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
257 (msg->type == XS_ERROR)))
258 transaction_end(); 254 transaction_end();
259 255
260 return ret; 256 return ret;
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 33b7ee34eda5..bbc1252a59f5 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -357,8 +357,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
357 357
358 len = simple_write_to_buffer(buffer->bin_buffer, 358 len = simple_write_to_buffer(buffer->bin_buffer,
359 buffer->bin_buffer_size, ppos, buf, count); 359 buffer->bin_buffer_size, ppos, buf, count);
360 if (len > 0)
361 *ppos += len;
362out: 360out:
363 mutex_unlock(&buffer->mutex); 361 mutex_unlock(&buffer->mutex);
364 return len; 362 return len;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 989a2cef6b76..fe7e83a45eff 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -483,9 +483,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
483 goto out_free; 483 goto out_free;
484 } 484 }
485 inode->i_state |= I_WB_SWITCH; 485 inode->i_state |= I_WB_SWITCH;
486 __iget(inode);
486 spin_unlock(&inode->i_lock); 487 spin_unlock(&inode->i_lock);
487 488
488 ihold(inode);
489 isw->inode = inode; 489 isw->inode = inode;
490 490
491 atomic_inc(&isw_nr_in_flight); 491 atomic_inc(&isw_nr_in_flight);
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 797ae2ec8eee..29c691265b49 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -78,6 +78,7 @@
78 78
79/* ACPI PCI Interrupt Link (pci_link.c) */ 79/* ACPI PCI Interrupt Link (pci_link.c) */
80 80
81int acpi_irq_penalty_init(void);
81int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering, 82int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
82 int *polarity, char **name); 83 int *polarity, char **name);
83int acpi_pci_link_free_irq(acpi_handle handle); 84int acpi_pci_link_free_irq(acpi_handle handle);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80776d0c52dc..fd72ecf0ce9f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent {
629 void *uout; 629 void *uout;
630 int uout_size; 630 int uout_size;
631 mlx5_cmd_cbk_t callback; 631 mlx5_cmd_cbk_t callback;
632 struct delayed_work cb_timeout_work;
632 void *context; 633 void *context;
633 int idx; 634 int idx;
634 struct completion done; 635 struct completion done;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ee38a4127475..f39b37180c41 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1062} 1062}
1063 1063
1064void __skb_get_hash(struct sk_buff *skb); 1064void __skb_get_hash(struct sk_buff *skb);
1065u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1065u32 skb_get_poff(const struct sk_buff *skb); 1066u32 skb_get_poff(const struct sk_buff *skb);
1066u32 __skb_get_poff(const struct sk_buff *skb, void *data, 1067u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1067 const struct flow_keys *keys, int hlen); 1068 const struct flow_keys *keys, int hlen);
@@ -2870,6 +2871,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
2870} 2871}
2871 2872
2872/** 2873/**
2874 * skb_push_rcsum - push skb and update receive checksum
2875 * @skb: buffer to update
2876 * @len: length of data pulled
2877 *
2878 * This function performs an skb_push on the packet and updates
2879 * the CHECKSUM_COMPLETE checksum. It should be used on
2880 * receive path processing instead of skb_push unless you know
2881 * that the checksum difference is zero (e.g., a valid IP header)
2882 * or you are setting ip_summed to CHECKSUM_NONE.
2883 */
2884static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2885 unsigned int len)
2886{
2887 skb_push(skb, len);
2888 skb_postpush_rcsum(skb, skb->data, len);
2889 return skb->data;
2890}
2891
2892/**
2873 * pskb_trim_rcsum - trim received skb and update checksum 2893 * pskb_trim_rcsum - trim received skb and update checksum
2874 * @skb: buffer to trim 2894 * @skb: buffer to trim
2875 * @len: new length 2895 * @len: new length
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 791800ddd6d9..6360c259da6d 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -34,6 +34,9 @@
34 34
35#define BOND_DEFAULT_MIIMON 100 35#define BOND_DEFAULT_MIIMON 100
36 36
37#ifndef __long_aligned
38#define __long_aligned __attribute__((aligned((sizeof(long)))))
39#endif
37/* 40/*
38 * Less bad way to call ioctl from within the kernel; this needs to be 41 * Less bad way to call ioctl from within the kernel; this needs to be
39 * done some other way to get the call out of interrupt context. 42 * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
138 struct reciprocal_value reciprocal_packets_per_slave; 141 struct reciprocal_value reciprocal_packets_per_slave;
139 u16 ad_actor_sys_prio; 142 u16 ad_actor_sys_prio;
140 u16 ad_user_port_key; 143 u16 ad_user_port_key;
141 u8 ad_actor_system[ETH_ALEN]; 144
145 /* 2 bytes of padding : see ether_addr_equal_64bits() */
146 u8 ad_actor_system[ETH_ALEN + 2];
142}; 147};
143 148
144struct bond_parm_tbl { 149struct bond_parm_tbl {
diff --git a/include/net/ip.h b/include/net/ip.h
index 37165fba3741..08f36cd2b874 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
313 return min(dst->dev->mtu, IP_MAX_MTU); 313 return min(dst->dev->mtu, IP_MAX_MTU);
314} 314}
315 315
316static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb) 316static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
317 const struct sk_buff *skb)
317{ 318{
318 struct sock *sk = skb->sk;
319
320 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 319 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
321 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 320 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
322 321
diff --git a/init/Kconfig b/init/Kconfig
index f755a602d4a1..c02d89777713 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1458,6 +1458,7 @@ config KALLSYMS_ALL
1458 1458
1459config KALLSYMS_ABSOLUTE_PERCPU 1459config KALLSYMS_ABSOLUTE_PERCPU
1460 bool 1460 bool
1461 depends on KALLSYMS
1461 default X86_64 && SMP 1462 default X86_64 && SMP
1462 1463
1463config KALLSYMS_BASE_RELATIVE 1464config KALLSYMS_BASE_RELATIVE
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 85cd41878a74..43d43a2d5811 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1678,12 +1678,33 @@ static bool is_orphaned_event(struct perf_event *event)
1678 return event->state == PERF_EVENT_STATE_DEAD; 1678 return event->state == PERF_EVENT_STATE_DEAD;
1679} 1679}
1680 1680
1681static inline int pmu_filter_match(struct perf_event *event) 1681static inline int __pmu_filter_match(struct perf_event *event)
1682{ 1682{
1683 struct pmu *pmu = event->pmu; 1683 struct pmu *pmu = event->pmu;
1684 return pmu->filter_match ? pmu->filter_match(event) : 1; 1684 return pmu->filter_match ? pmu->filter_match(event) : 1;
1685} 1685}
1686 1686
1687/*
1688 * Check whether we should attempt to schedule an event group based on
1689 * PMU-specific filtering. An event group can consist of HW and SW events,
1690 * potentially with a SW leader, so we must check all the filters, to
1691 * determine whether a group is schedulable:
1692 */
1693static inline int pmu_filter_match(struct perf_event *event)
1694{
1695 struct perf_event *child;
1696
1697 if (!__pmu_filter_match(event))
1698 return 0;
1699
1700 list_for_each_entry(child, &event->sibling_list, group_entry) {
1701 if (!__pmu_filter_match(child))
1702 return 0;
1703 }
1704
1705 return 1;
1706}
1707
1687static inline int 1708static inline int
1688event_filter_match(struct perf_event *event) 1709event_filter_match(struct perf_event *event)
1689{ 1710{
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bdcbeea90c95..c8c5d2d48424 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
735 } 735 }
736} 736}
737 737
738static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
739static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
740#else 738#else
741void init_entity_runnable_average(struct sched_entity *se) 739void init_entity_runnable_average(struct sched_entity *se)
742{ 740{
@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2499 2497
2500#ifdef CONFIG_FAIR_GROUP_SCHED 2498#ifdef CONFIG_FAIR_GROUP_SCHED
2501# ifdef CONFIG_SMP 2499# ifdef CONFIG_SMP
2502static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) 2500static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2503{ 2501{
2504 long tg_weight; 2502 long tg_weight, load, shares;
2505 2503
2506 /* 2504 /*
2507 * Use this CPU's real-time load instead of the last load contribution 2505 * This really should be: cfs_rq->avg.load_avg, but instead we use
2508 * as the updating of the contribution is delayed, and we will use the 2506 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2509 * the real-time load to calc the share. See update_tg_load_avg(). 2507 * the shares for small weight interactive tasks.
2510 */ 2508 */
2511 tg_weight = atomic_long_read(&tg->load_avg); 2509 load = scale_load_down(cfs_rq->load.weight);
2512 tg_weight -= cfs_rq->tg_load_avg_contrib;
2513 tg_weight += cfs_rq->load.weight;
2514 2510
2515 return tg_weight; 2511 tg_weight = atomic_long_read(&tg->load_avg);
2516}
2517
2518static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2519{
2520 long tg_weight, load, shares;
2521 2512
2522 tg_weight = calc_tg_weight(tg, cfs_rq); 2513 /* Ensure tg_weight >= load */
2523 load = cfs_rq->load.weight; 2514 tg_weight -= cfs_rq->tg_load_avg_contrib;
2515 tg_weight += load;
2524 2516
2525 shares = (tg->shares * load); 2517 shares = (tg->shares * load);
2526 if (tg_weight) 2518 if (tg_weight)
@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
2539 return tg->shares; 2531 return tg->shares;
2540} 2532}
2541# endif /* CONFIG_SMP */ 2533# endif /* CONFIG_SMP */
2534
2542static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 2535static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2543 unsigned long weight) 2536 unsigned long weight)
2544{ 2537{
@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4946 return wl; 4939 return wl;
4947 4940
4948 for_each_sched_entity(se) { 4941 for_each_sched_entity(se) {
4949 long w, W; 4942 struct cfs_rq *cfs_rq = se->my_q;
4943 long W, w = cfs_rq_load_avg(cfs_rq);
4950 4944
4951 tg = se->my_q->tg; 4945 tg = cfs_rq->tg;
4952 4946
4953 /* 4947 /*
4954 * W = @wg + \Sum rw_j 4948 * W = @wg + \Sum rw_j
4955 */ 4949 */
4956 W = wg + calc_tg_weight(tg, se->my_q); 4950 W = wg + atomic_long_read(&tg->load_avg);
4951
4952 /* Ensure \Sum rw_j >= rw_i */
4953 W -= cfs_rq->tg_load_avg_contrib;
4954 W += w;
4957 4955
4958 /* 4956 /*
4959 * w = rw_i + @wl 4957 * w = rw_i + @wl
4960 */ 4958 */
4961 w = cfs_rq_load_avg(se->my_q) + wl; 4959 w += wl;
4962 4960
4963 /* 4961 /*
4964 * wl = S * s'_i; see (2) 4962 * wl = S * s'_i; see (2)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 2d25979273a6..77e7f69bf80d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -700,7 +700,7 @@ static int
700br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, 700br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
701 int (*output)(struct net *, struct sock *, struct sk_buff *)) 701 int (*output)(struct net *, struct sock *, struct sk_buff *))
702{ 702{
703 unsigned int mtu = ip_skb_dst_mtu(skb); 703 unsigned int mtu = ip_skb_dst_mtu(sk, skb);
704 struct iphdr *iph = ip_hdr(skb); 704 struct iphdr *iph = ip_hdr(skb);
705 705
706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) || 706 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a669dea146c6..61ad43f61c5e 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
651} 651}
652EXPORT_SYMBOL(make_flow_keys_digest); 652EXPORT_SYMBOL(make_flow_keys_digest);
653 653
654static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
655
656u32 __skb_get_hash_symmetric(struct sk_buff *skb)
657{
658 struct flow_keys keys;
659
660 __flow_hash_secret_init();
661
662 memset(&keys, 0, sizeof(keys));
663 __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
664 NULL, 0, 0, 0,
665 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
666
667 return __flow_hash_from_keys(&keys, hashrnd);
668}
669EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
670
654/** 671/**
655 * __skb_get_hash: calculate a flow hash 672 * __skb_get_hash: calculate a flow hash
656 * @skb: sk_buff to calculate flow hash from 673 * @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
868 }, 885 },
869}; 886};
870 887
888static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
889 {
890 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
891 .offset = offsetof(struct flow_keys, control),
892 },
893 {
894 .key_id = FLOW_DISSECTOR_KEY_BASIC,
895 .offset = offsetof(struct flow_keys, basic),
896 },
897 {
898 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
899 .offset = offsetof(struct flow_keys, addrs.v4addrs),
900 },
901 {
902 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
903 .offset = offsetof(struct flow_keys, addrs.v6addrs),
904 },
905 {
906 .key_id = FLOW_DISSECTOR_KEY_PORTS,
907 .offset = offsetof(struct flow_keys, ports),
908 },
909};
910
871static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { 911static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
872 { 912 {
873 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 913 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
889 skb_flow_dissector_init(&flow_keys_dissector, 929 skb_flow_dissector_init(&flow_keys_dissector,
890 flow_keys_dissector_keys, 930 flow_keys_dissector_keys,
891 ARRAY_SIZE(flow_keys_dissector_keys)); 931 ARRAY_SIZE(flow_keys_dissector_keys));
932 skb_flow_dissector_init(&flow_keys_dissector_symmetric,
933 flow_keys_dissector_symmetric_keys,
934 ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
892 skb_flow_dissector_init(&flow_keys_buf_dissector, 935 skb_flow_dissector_init(&flow_keys_buf_dissector,
893 flow_keys_buf_dissector_keys, 936 flow_keys_buf_dissector_keys,
894 ARRAY_SIZE(flow_keys_buf_dissector_keys)); 937 ARRAY_SIZE(flow_keys_buf_dissector_keys));
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f2b77e549c03..eb12d2161fb2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3016,24 +3016,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3016EXPORT_SYMBOL_GPL(skb_append_pagefrags); 3016EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3017 3017
3018/** 3018/**
3019 * skb_push_rcsum - push skb and update receive checksum
3020 * @skb: buffer to update
3021 * @len: length of data pulled
3022 *
3023 * This function performs an skb_push on the packet and updates
3024 * the CHECKSUM_COMPLETE checksum. It should be used on
3025 * receive path processing instead of skb_push unless you know
3026 * that the checksum difference is zero (e.g., a valid IP header)
3027 * or you are setting ip_summed to CHECKSUM_NONE.
3028 */
3029static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
3030{
3031 skb_push(skb, len);
3032 skb_postpush_rcsum(skb, skb->data, len);
3033 return skb->data;
3034}
3035
3036/**
3037 * skb_pull_rcsum - pull skb and update receive checksum 3019 * skb_pull_rcsum - pull skb and update receive checksum
3038 * @skb: buffer to update 3020 * @skb: buffer to update
3039 * @len: length of data pulled 3021 * @len: length of data pulled
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index df4803437888..a796fc7cbc35 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -41,6 +41,7 @@
41#include <net/dn_fib.h> 41#include <net/dn_fib.h>
42#include <net/dn_neigh.h> 42#include <net/dn_neigh.h>
43#include <net/dn_dev.h> 43#include <net/dn_dev.h>
44#include <net/nexthop.h>
44 45
45#define RT_MIN_TABLE 1 46#define RT_MIN_TABLE 1
46 47
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
150 struct rtnexthop *nhp = nla_data(attr); 151 struct rtnexthop *nhp = nla_data(attr);
151 int nhs = 0, nhlen = nla_len(attr); 152 int nhs = 0, nhlen = nla_len(attr);
152 153
153 while(nhlen >= (int)sizeof(struct rtnexthop)) { 154 while (rtnh_ok(nhp, nhlen)) {
154 if ((nhlen -= nhp->rtnh_len) < 0)
155 return 0;
156 nhs++; 155 nhs++;
157 nhp = RTNH_NEXT(nhp); 156 nhp = rtnh_next(nhp, &nhlen);
158 } 157 }
159 158
160 return nhs; 159 /* leftover implies invalid nexthop configuration, discard it */
160 return nhlen > 0 ? 0 : nhs;
161} 161}
162 162
163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr, 163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
167 int nhlen = nla_len(attr); 167 int nhlen = nla_len(attr);
168 168
169 change_nexthops(fi) { 169 change_nexthops(fi) {
170 int attrlen = nhlen - sizeof(struct rtnexthop); 170 int attrlen;
171 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) 171
172 if (!rtnh_ok(nhp, nhlen))
172 return -EINVAL; 173 return -EINVAL;
173 174
174 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags; 175 nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
175 nh->nh_oif = nhp->rtnh_ifindex; 176 nh->nh_oif = nhp->rtnh_ifindex;
176 nh->nh_weight = nhp->rtnh_hops + 1; 177 nh->nh_weight = nhp->rtnh_hops + 1;
177 178
178 if (attrlen) { 179 attrlen = rtnh_attrlen(nhp);
180 if (attrlen > 0) {
179 struct nlattr *gw_attr; 181 struct nlattr *gw_attr;
180 182
181 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY); 183 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
182 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0; 184 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
183 } 185 }
184 nhp = RTNH_NEXT(nhp); 186
187 nhp = rtnh_next(nhp, &nhlen);
185 } endfor_nexthops(fi); 188 } endfor_nexthops(fi);
186 189
187 return 0; 190 return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 124bf0a66328..4bd4921639c3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
271 return dst_output(net, sk, skb); 271 return dst_output(net, sk, skb);
272 } 272 }
273#endif 273#endif
274 mtu = ip_skb_dst_mtu(skb); 274 mtu = ip_skb_dst_mtu(sk, skb);
275 if (skb_is_gso(skb)) 275 if (skb_is_gso(skb))
276 return ip_finish_output_gso(net, sk, skb, mtu); 276 return ip_finish_output_gso(net, sk, skb, mtu);
277 277
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
541 541
542 iph = ip_hdr(skb); 542 iph = ip_hdr(skb);
543 543
544 mtu = ip_skb_dst_mtu(skb); 544 mtu = ip_skb_dst_mtu(sk, skb);
545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) 545 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
546 mtu = IPCB(skb)->frag_max_size; 546 mtu = IPCB(skb)->frag_max_size;
547 547
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 1bcef2369d64..771be1fa4176 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
177 } 177 }
178 } 178 }
179 179
180 free_percpu(non_pcpu_rt->rt6i_pcpu);
180 non_pcpu_rt->rt6i_pcpu = NULL; 181 non_pcpu_rt->rt6i_pcpu = NULL;
181} 182}
182 183
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9bff6ef16fa7..9f0983fa4d52 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341 struct sk_buff *skb, 1341 struct sk_buff *skb,
1342 unsigned int num) 1342 unsigned int num)
1343{ 1343{
1344 return reciprocal_scale(skb_get_hash(skb), num); 1344 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1345} 1345}
1346 1346
1347static unsigned int fanout_demux_lb(struct packet_fanout *f, 1347static unsigned int fanout_demux_lb(struct packet_fanout *f,
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 74ee126a6fe6..c8a7b4c90190 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -616,7 +616,7 @@ static int rds_tcp_init(void)
616 616
617 ret = rds_tcp_recv_init(); 617 ret = rds_tcp_recv_init();
618 if (ret) 618 if (ret)
619 goto out_slab; 619 goto out_pernet;
620 620
621 ret = rds_trans_register(&rds_tcp_transport); 621 ret = rds_trans_register(&rds_tcp_transport);
622 if (ret) 622 if (ret)
@@ -628,8 +628,9 @@ static int rds_tcp_init(void)
628 628
629out_recv: 629out_recv:
630 rds_tcp_recv_exit(); 630 rds_tcp_recv_exit();
631out_slab: 631out_pernet:
632 unregister_pernet_subsys(&rds_tcp_net_ops); 632 unregister_pernet_subsys(&rds_tcp_net_ops);
633out_slab:
633 kmem_cache_destroy(rds_tcp_conn_slab); 634 kmem_cache_destroy(rds_tcp_conn_slab);
634out: 635out:
635 return ret; 636 return ret;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 128942bc9e42..1f5bd6ccbd2c 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
181 181
182 if (!(at & AT_EGRESS)) { 182 if (!(at & AT_EGRESS)) {
183 if (m->tcfm_ok_push) 183 if (m->tcfm_ok_push)
184 skb_push(skb2, skb->mac_len); 184 skb_push_rcsum(skb2, skb->mac_len);
185 } 185 }
186 186
187 /* mirror is always swallowed */ 187 /* mirror is always swallowed */
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 3ad9fab1985f..1fd464764765 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
604 604
605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); 605 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); 606 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
607 nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]), 607 nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
608 TIPC_MAX_LINK_NAME); 608 TIPC_MAX_LINK_NAME);
609 609
610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, 610 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 2660fbcf94d1..7798e1608f4f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -500,34 +500,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
500{ 500{
501 struct common_audit_data sa; 501 struct common_audit_data sa;
502 struct apparmor_audit_data aad = {0,}; 502 struct apparmor_audit_data aad = {0,};
503 char *command, *args = value; 503 char *command, *largs = NULL, *args = value;
504 size_t arg_size; 504 size_t arg_size;
505 int error; 505 int error;
506 506
507 if (size == 0) 507 if (size == 0)
508 return -EINVAL; 508 return -EINVAL;
509 /* args points to a PAGE_SIZE buffer, AppArmor requires that
510 * the buffer must be null terminated or have size <= PAGE_SIZE -1
511 * so that AppArmor can null terminate them
512 */
513 if (args[size - 1] != '\0') {
514 if (size == PAGE_SIZE)
515 return -EINVAL;
516 args[size] = '\0';
517 }
518
519 /* task can only write its own attributes */ 509 /* task can only write its own attributes */
520 if (current != task) 510 if (current != task)
521 return -EACCES; 511 return -EACCES;
522 512
523 args = value; 513 /* AppArmor requires that the buffer must be null terminated atm */
514 if (args[size - 1] != '\0') {
515 /* null terminate */
516 largs = args = kmalloc(size + 1, GFP_KERNEL);
517 if (!args)
518 return -ENOMEM;
519 memcpy(args, value, size);
520 args[size] = '\0';
521 }
522
523 error = -EINVAL;
524 args = strim(args); 524 args = strim(args);
525 command = strsep(&args, " "); 525 command = strsep(&args, " ");
526 if (!args) 526 if (!args)
527 return -EINVAL; 527 goto out;
528 args = skip_spaces(args); 528 args = skip_spaces(args);
529 if (!*args) 529 if (!*args)
530 return -EINVAL; 530 goto out;
531 531
532 arg_size = size - (args - (char *) value); 532 arg_size = size - (args - (char *) value);
533 if (strcmp(name, "current") == 0) { 533 if (strcmp(name, "current") == 0) {
@@ -553,10 +553,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
553 goto fail; 553 goto fail;
554 } else 554 } else
555 /* only support the "current" and "exec" process attributes */ 555 /* only support the "current" and "exec" process attributes */
556 return -EINVAL; 556 goto fail;
557 557
558 if (!error) 558 if (!error)
559 error = size; 559 error = size;
560out:
561 kfree(largs);
560 return error; 562 return error;
561 563
562fail: 564fail:
@@ -565,9 +567,9 @@ fail:
565 aad.profile = aa_current_profile(); 567 aad.profile = aa_current_profile();
566 aad.op = OP_SETPROCATTR; 568 aad.op = OP_SETPROCATTR;
567 aad.info = name; 569 aad.info = name;
568 aad.error = -EINVAL; 570 aad.error = error = -EINVAL;
569 aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL); 571 aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
570 return -EINVAL; 572 goto out;
571} 573}
572 574
573static int apparmor_task_setrlimit(struct task_struct *task, 575static int apparmor_task_setrlimit(struct task_struct *task,
diff --git a/sound/core/timer.c b/sound/core/timer.c
index e722022d325d..9a6157ea6881 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1955 1955
1956 qhead = tu->qhead++; 1956 qhead = tu->qhead++;
1957 tu->qhead %= tu->queue_size; 1957 tu->qhead %= tu->queue_size;
1958 tu->qused--;
1958 spin_unlock_irq(&tu->qlock); 1959 spin_unlock_irq(&tu->qlock);
1959 1960
1960 if (tu->tread) { 1961 if (tu->tread) {
@@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
1968 } 1969 }
1969 1970
1970 spin_lock_irq(&tu->qlock); 1971 spin_lock_irq(&tu->qlock);
1971 tu->qused--;
1972 if (err < 0) 1972 if (err < 0)
1973 goto _error; 1973 goto _error;
1974 result += unit; 1974 result += unit;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 4a054d720112..d3125c169684 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
1444 int page, p, pp, delta, i; 1444 int page, p, pp, delta, i;
1445 1445
1446 page = 1446 page =
1447 (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) & 1447 (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
1448 WT_SUBBUF_MASK) 1448 >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
1449 >> WT_SUBBUF_SHIFT;
1450 if (dma->nr_periods >= 4) 1449 if (dma->nr_periods >= 4)
1451 delta = (page - dma->period_real) & 3; 1450 delta = (page - dma->period_real) & 3;
1452 else { 1451 else {
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 1cb85aeb0cea..286f5e3686a3 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
2200 u32 pipe_alloc_mask; 2200 u32 pipe_alloc_mask;
2201 int err; 2201 int err;
2202 2202
2203 commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL); 2203 commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
2204 if (commpage_bak == NULL) 2204 if (commpage_bak == NULL)
2205 return -ENOMEM; 2205 return -ENOMEM;
2206 commpage = chip->comm_page; 2206 commpage = chip->comm_page;
2207 memcpy(commpage_bak, commpage, sizeof(struct comm_page)); 2207 memcpy(commpage_bak, commpage, sizeof(*commpage));
2208 2208
2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device); 2209 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
2210 if (err < 0) { 2210 if (err < 0) {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 320445f3bf73..79c7b340acc2 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
3977 3977
3978 for (n = 0; n < spec->paths.used; n++) { 3978 for (n = 0; n < spec->paths.used; n++) {
3979 path = snd_array_elem(&spec->paths, n); 3979 path = snd_array_elem(&spec->paths, n);
3980 if (!path->depth)
3981 continue;
3980 if (path->path[0] == nid || 3982 if (path->path[0] == nid ||
3981 path->path[path->depth - 1] == nid) { 3983 path->path[path->depth - 1] == nid) {
3982 bool pin_old = path->pin_enabled; 3984 bool pin_old = path->pin_enabled;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 94089fc71884..e320c44714b1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -367,9 +367,10 @@ enum {
367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) 367#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) 368#define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) 369#define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
370#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
370#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) 371#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
371#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ 372#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
372 IS_KBL(pci) || IS_KBL_LP(pci) 373 IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
373 374
374static char *driver_short_names[] = { 375static char *driver_short_names[] = {
375 [AZX_DRIVER_ICH] = "HDA Intel", 376 [AZX_DRIVER_ICH] = "HDA Intel",
@@ -2190,6 +2191,9 @@ static const struct pci_device_id azx_ids[] = {
2190 /* Kabylake-LP */ 2191 /* Kabylake-LP */
2191 { PCI_DEVICE(0x8086, 0x9d71), 2192 { PCI_DEVICE(0x8086, 0x9d71),
2192 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, 2193 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2194 /* Kabylake-H */
2195 { PCI_DEVICE(0x8086, 0xa2f0),
2196 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
2193 /* Broxton-P(Apollolake) */ 2197 /* Broxton-P(Apollolake) */
2194 { PCI_DEVICE(0x8086, 0x5a98), 2198 { PCI_DEVICE(0x8086, 0x5a98),
2195 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, 2199 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 900bfbc3368c..5fac786e4982 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5651,6 +5651,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), 5651 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), 5652 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), 5653 SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
5654 SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
5654 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), 5655 SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
5655 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5656 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5656 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), 5657 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 4d82a58ff6b0..f3fb98f0a995 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -483,9 +483,10 @@ config SND_SOC_DMIC
483 tristate 483 tristate
484 484
485config SND_SOC_HDMI_CODEC 485config SND_SOC_HDMI_CODEC
486 tristate 486 tristate
487 select SND_PCM_ELD 487 select SND_PCM_ELD
488 select SND_PCM_IEC958 488 select SND_PCM_IEC958
489 select HDMI
489 490
490config SND_SOC_ES8328 491config SND_SOC_ES8328
491 tristate "Everest Semi ES8328 CODEC" 492 tristate "Everest Semi ES8328 CODEC"
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 647f69de6baa..5013d2ba0c10 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
146 .max_register = 0x16, 146 .max_register = 0x16,
147 .reg_defaults = ak4613_reg, 147 .reg_defaults = ak4613_reg,
148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg), 148 .num_reg_defaults = ARRAY_SIZE(ak4613_reg),
149 .cache_type = REGCACHE_RBTREE,
149}; 150};
150 151
151static const struct of_device_id ak4613_of_match[] = { 152static const struct of_device_id ak4613_of_match[] = {
@@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client)
530static struct i2c_driver ak4613_i2c_driver = { 531static struct i2c_driver ak4613_i2c_driver = {
531 .driver = { 532 .driver = {
532 .name = "ak4613-codec", 533 .name = "ak4613-codec",
533 .owner = THIS_MODULE,
534 .of_match_table = ak4613_of_match, 534 .of_match_table = ak4613_of_match,
535 }, 535 },
536 .probe = ak4613_i2c_probe, 536 .probe = ak4613_i2c_probe,
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index d6f4abbbf8a7..fb3885fe0afb 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty)
226 if (!tty->disc_data) 226 if (!tty->disc_data)
227 return -ENODEV; 227 return -ENODEV;
228 228
229 tty->receive_room = 16;
229 if (tty->ops->write(tty, v253_init, len) != len) { 230 if (tty->ops->write(tty, v253_init, len) != len) {
230 ret = -EIO; 231 ret = -EIO;
231 goto err; 232 goto err;
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 181cd3bf0b92..2abb742fc47b 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
1474 * exit, we call pm_runtime_suspend() so that will do for us 1474 * exit, we call pm_runtime_suspend() so that will do for us
1475 */ 1475 */
1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1476 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
1477 if (!hlink) {
1478 dev_err(&edev->hdac.dev, "hdac link not found\n");
1479 return -EIO;
1480 }
1481
1477 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1482 snd_hdac_ext_bus_link_get(edev->ebus, hlink);
1478 1483
1479 ret = create_fill_widget_route_map(dapm); 1484 ret = create_fill_widget_route_map(dapm);
@@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
1634 1639
1635 /* hold the ref while we probe */ 1640 /* hold the ref while we probe */
1636 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev)); 1641 hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
1642 if (!hlink) {
1643 dev_err(&edev->hdac.dev, "hdac link not found\n");
1644 return -EIO;
1645 }
1646
1637 snd_hdac_ext_bus_link_get(edev->ebus, hlink); 1647 snd_hdac_ext_bus_link_get(edev->ebus, hlink);
1638 1648
1639 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL); 1649 hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
@@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
1744 } 1754 }
1745 1755
1746 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1756 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
1757 if (!hlink) {
1758 dev_err(dev, "hdac link not found\n");
1759 return -EIO;
1760 }
1761
1747 snd_hdac_ext_bus_link_put(ebus, hlink); 1762 snd_hdac_ext_bus_link_put(ebus, hlink);
1748 1763
1749 return 0; 1764 return 0;
@@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
1765 return 0; 1780 return 0;
1766 1781
1767 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev)); 1782 hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
1783 if (!hlink) {
1784 dev_err(dev, "hdac link not found\n");
1785 return -EIO;
1786 }
1787
1768 snd_hdac_ext_bus_link_get(ebus, hlink); 1788 snd_hdac_ext_bus_link_get(ebus, hlink);
1769 1789
1770 err = snd_hdac_display_power(bus, true); 1790 err = snd_hdac_display_power(bus, true);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 3c6594da6c9c..d70847c9eeb0 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = {
253 { 0x2b, 0x5454 }, 253 { 0x2b, 0x5454 },
254 { 0x2c, 0xaaa0 }, 254 { 0x2c, 0xaaa0 },
255 { 0x2d, 0x0000 }, 255 { 0x2d, 0x0000 },
256 { 0x2f, 0x1002 }, 256 { 0x2f, 0x5002 },
257 { 0x31, 0x5000 }, 257 { 0x31, 0x5000 },
258 { 0x32, 0x0000 }, 258 { 0x32, 0x0000 },
259 { 0x33, 0x0000 }, 259 { 0x33, 0x0000 },
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index 49a9e7049e2b..0af5ddbef1da 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1), 619 RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL, 620 SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 621 RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
622 39, 0, out_vol_tlv), 622 39, 1, out_vol_tlv),
623 /* OUTPUT Control */ 623 /* OUTPUT Control */
624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1, 624 SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1), 625 RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index da60e3fe5ee7..e7fe6b7b95b7 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
1872 .capture = { 1872 .capture = {
1873 .stream_name = "Audio Trace CPU", 1873 .stream_name = "Audio Trace CPU",
1874 .channels_min = 1, 1874 .channels_min = 1,
1875 .channels_max = 6, 1875 .channels_max = 4,
1876 .rates = WM5102_RATES, 1876 .rates = WM5102_RATES,
1877 .formats = WM5102_FORMATS, 1877 .formats = WM5102_FORMATS,
1878 }, 1878 },
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index b5820e4d5471..d54f1b46c9ec 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
1723 { "OUT2L", NULL, "SYSCLK" }, 1723 { "OUT2L", NULL, "SYSCLK" },
1724 { "OUT2R", NULL, "SYSCLK" }, 1724 { "OUT2R", NULL, "SYSCLK" },
1725 { "OUT3L", NULL, "SYSCLK" }, 1725 { "OUT3L", NULL, "SYSCLK" },
1726 { "OUT3R", NULL, "SYSCLK" },
1726 { "OUT4L", NULL, "SYSCLK" }, 1727 { "OUT4L", NULL, "SYSCLK" },
1727 { "OUT4R", NULL, "SYSCLK" }, 1728 { "OUT4R", NULL, "SYSCLK" },
1728 { "OUT5L", NULL, "SYSCLK" }, 1729 { "OUT5L", NULL, "SYSCLK" },
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index f6f9395ea38e..1c600819f768 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
743 .max_register = WM8940_MONOMIX, 743 .max_register = WM8940_MONOMIX,
744 .reg_defaults = wm8940_reg_defaults, 744 .reg_defaults = wm8940_reg_defaults,
745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults), 745 .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
746 .cache_type = REGCACHE_RBTREE,
746 747
747 .readable_reg = wm8940_readable_register, 748 .readable_reg = wm8940_readable_register,
748 .volatile_reg = wm8940_volatile_register, 749 .volatile_reg = wm8940_volatile_register,
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 0f66fda2c772..237dc67002ef 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
1513}; 1513};
1514 1514
1515static struct davinci_mcasp_pdata dra7_mcasp_pdata = { 1515static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
1516 .tx_dma_offset = 0x200, 1516 /* The CFG port offset will be calculated if it is needed */
1517 .rx_dma_offset = 0x284, 1517 .tx_dma_offset = 0,
1518 .rx_dma_offset = 0,
1518 .version = MCASP_VERSION_4, 1519 .version = MCASP_VERSION_4,
1519}; 1520};
1520 1521
@@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
1734 return PCM_EDMA; 1735 return PCM_EDMA;
1735} 1736}
1736 1737
1738static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
1739{
1740 int i;
1741 u32 offset = 0;
1742
1743 if (pdata->version != MCASP_VERSION_4)
1744 return pdata->tx_dma_offset;
1745
1746 for (i = 0; i < pdata->num_serializer; i++) {
1747 if (pdata->serial_dir[i] == TX_MODE) {
1748 if (!offset) {
1749 offset = DAVINCI_MCASP_TXBUF_REG(i);
1750 } else {
1751 pr_err("%s: Only one serializer allowed!\n",
1752 __func__);
1753 break;
1754 }
1755 }
1756 }
1757
1758 return offset;
1759}
1760
1761static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
1762{
1763 int i;
1764 u32 offset = 0;
1765
1766 if (pdata->version != MCASP_VERSION_4)
1767 return pdata->rx_dma_offset;
1768
1769 for (i = 0; i < pdata->num_serializer; i++) {
1770 if (pdata->serial_dir[i] == RX_MODE) {
1771 if (!offset) {
1772 offset = DAVINCI_MCASP_RXBUF_REG(i);
1773 } else {
1774 pr_err("%s: Only one serializer allowed!\n",
1775 __func__);
1776 break;
1777 }
1778 }
1779 }
1780
1781 return offset;
1782}
1783
1737static int davinci_mcasp_probe(struct platform_device *pdev) 1784static int davinci_mcasp_probe(struct platform_device *pdev)
1738{ 1785{
1739 struct snd_dmaengine_dai_dma_data *dma_data; 1786 struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1862 if (dat) 1909 if (dat)
1863 dma_data->addr = dat->start; 1910 dma_data->addr = dat->start;
1864 else 1911 else
1865 dma_data->addr = mem->start + pdata->tx_dma_offset; 1912 dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
1866 1913
1867 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK]; 1914 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
1868 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1915 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1883 if (dat) 1930 if (dat)
1884 dma_data->addr = dat->start; 1931 dma_data->addr = dat->start;
1885 else 1932 else
1886 dma_data->addr = mem->start + pdata->rx_dma_offset; 1933 dma_data->addr =
1934 mem->start + davinci_mcasp_rxdma_offset(pdata);
1887 1935
1888 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE]; 1936 dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
1889 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1937 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 1e8787fb3fb7..afddc8010c54 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -85,9 +85,9 @@
85 (n << 2)) 85 (n << 2))
86 86
87/* Transmit Buffer for Serializer n */ 87/* Transmit Buffer for Serializer n */
88#define DAVINCI_MCASP_TXBUF_REG 0x200 88#define DAVINCI_MCASP_TXBUF_REG(n) (0x200 + (n << 2))
89/* Receive Buffer for Serializer n */ 89/* Receive Buffer for Serializer n */
90#define DAVINCI_MCASP_RXBUF_REG 0x280 90#define DAVINCI_MCASP_RXBUF_REG(n) (0x280 + (n << 2))
91 91
92/* McASP FIFO Registers */ 92/* McASP FIFO Registers */
93#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010) 93#define DAVINCI_MCASP_V2_AFIFO_BASE (0x1010)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 632ecc0e3956..bedec4a32581 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET; 952 ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 953 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
954 case SND_SOC_DAIFMT_I2S: 954 case SND_SOC_DAIFMT_I2S:
955 regmap_update_bits(regs, CCSR_SSI_STCCR,
956 CCSR_SSI_SxCCR_DC_MASK,
957 CCSR_SSI_SxCCR_DC(2));
958 regmap_update_bits(regs, CCSR_SSI_SRCCR,
959 CCSR_SSI_SxCCR_DC_MASK,
960 CCSR_SSI_SxCCR_DC(2));
955 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { 961 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
956 case SND_SOC_DAIFMT_CBM_CFS: 962 case SND_SOC_DAIFMT_CBM_CFS:
957 case SND_SOC_DAIFMT_CBS_CFS: 963 case SND_SOC_DAIFMT_CBS_CFS:
958 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER; 964 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
959 regmap_update_bits(regs, CCSR_SSI_STCCR,
960 CCSR_SSI_SxCCR_DC_MASK,
961 CCSR_SSI_SxCCR_DC(2));
962 regmap_update_bits(regs, CCSR_SSI_SRCCR,
963 CCSR_SSI_SxCCR_DC_MASK,
964 CCSR_SSI_SxCCR_DC(2));
965 break; 965 break;
966 case SND_SOC_DAIFMT_CBM_CFM: 966 case SND_SOC_DAIFMT_CBM_CFM:
967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE; 967 ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-compress.c b/sound/soc/intel/atom/sst-mfld-platform-compress.c
index 395168986462..1bead81bb510 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-compress.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-compress.c
@@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
182 case SNDRV_PCM_TRIGGER_START: 182 case SNDRV_PCM_TRIGGER_START:
183 if (stream->compr_ops->stream_start) 183 if (stream->compr_ops->stream_start)
184 return stream->compr_ops->stream_start(sst->dev, stream->id); 184 return stream->compr_ops->stream_start(sst->dev, stream->id);
185 break;
185 case SNDRV_PCM_TRIGGER_STOP: 186 case SNDRV_PCM_TRIGGER_STOP:
186 if (stream->compr_ops->stream_drop) 187 if (stream->compr_ops->stream_drop)
187 return stream->compr_ops->stream_drop(sst->dev, stream->id); 188 return stream->compr_ops->stream_drop(sst->dev, stream->id);
189 break;
188 case SND_COMPR_TRIGGER_DRAIN: 190 case SND_COMPR_TRIGGER_DRAIN:
189 if (stream->compr_ops->stream_drain) 191 if (stream->compr_ops->stream_drain)
190 return stream->compr_ops->stream_drain(sst->dev, stream->id); 192 return stream->compr_ops->stream_drain(sst->dev, stream->id);
193 break;
191 case SND_COMPR_TRIGGER_PARTIAL_DRAIN: 194 case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
192 if (stream->compr_ops->stream_partial_drain) 195 if (stream->compr_ops->stream_partial_drain)
193 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id); 196 return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
197 break;
194 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 198 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
195 if (stream->compr_ops->stream_pause) 199 if (stream->compr_ops->stream_pause)
196 return stream->compr_ops->stream_pause(sst->dev, stream->id); 200 return stream->compr_ops->stream_pause(sst->dev, stream->id);
201 break;
197 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 202 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
198 if (stream->compr_ops->stream_pause_release) 203 if (stream->compr_ops->stream_pause_release)
199 return stream->compr_ops->stream_pause_release(sst->dev, stream->id); 204 return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
200 default: 205 break;
201 return -EINVAL;
202 } 206 }
207 return -EINVAL;
203} 208}
204 209
205static int sst_platform_compr_pointer(struct snd_compr_stream *cstream, 210static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
diff --git a/sound/soc/intel/skylake/bxt-sst.c b/sound/soc/intel/skylake/bxt-sst.c
index 965ce40ce752..8b95e09e23e8 100644
--- a/sound/soc/intel/skylake/bxt-sst.c
+++ b/sound/soc/intel/skylake/bxt-sst.c
@@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ), 291 sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ); 292 SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
293 293
294 INIT_LIST_HEAD(&sst->module_list);
294 ret = skl_ipc_init(dev, skl); 295 ret = skl_ipc_init(dev, skl);
295 if (ret) 296 if (ret)
296 return ret; 297 return ret;
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 49354d17ea55..c4c51a4d3c8f 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
518 } 518 }
519 } 519 }
520 520
521 rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr); 521 rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
522 rsnd_mod_write(adg_mod, BRRA, rbga); 522 rsnd_mod_write(adg_mod, BRRA, rbga);
523 rsnd_mod_write(adg_mod, BRRB, rbgb); 523 rsnd_mod_write(adg_mod, BRRB, rbgb);
524 524