diff options
| author | Ingo Molnar <mingo@kernel.org> | 2018-10-02 03:50:34 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-10-02 03:50:34 -0400 |
| commit | 97e831e13015045c098e1187f9b8b8e9bace9413 (patch) | |
| tree | 26c4a34a290841a140007bf10a45798b8ea49c44 | |
| parent | c90d3bd1b9e83237c9d7e04542ffacc10a277560 (diff) | |
| parent | d7cbbe49a9304520181fb8c9272d1327deec8453 (diff) | |
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
236 files changed, 1733 insertions, 1268 deletions
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt index 996ce84352cb..7cccc49b6bea 100644 --- a/Documentation/devicetree/bindings/input/gpio-keys.txt +++ b/Documentation/devicetree/bindings/input/gpio-keys.txt | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | Device-Tree bindings for input/gpio_keys.c keyboard driver | 1 | Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver |
| 2 | 2 | ||
| 3 | Required properties: | 3 | Required properties: |
| 4 | - compatible = "gpio-keys"; | 4 | - compatible = "gpio-keys"; |
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst index 3f4f6c9ffad7..a4222b6cd2d3 100644 --- a/Documentation/media/uapi/dvb/video_function_calls.rst +++ b/Documentation/media/uapi/dvb/video_function_calls.rst | |||
| @@ -33,4 +33,3 @@ Video Function Calls | |||
| 33 | video-clear-buffer | 33 | video-clear-buffer |
| 34 | video-set-streamtype | 34 | video-set-streamtype |
| 35 | video-set-format | 35 | video-set-format |
| 36 | video-set-attributes | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 02a39617ec82..a255240d1452 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -9716,13 +9716,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ | |||
| 9716 | S: Maintained | 9716 | S: Maintained |
| 9717 | F: drivers/media/dvb-frontends/mn88473* | 9717 | F: drivers/media/dvb-frontends/mn88473* |
| 9718 | 9718 | ||
| 9719 | PCI DRIVER FOR MOBIVEIL PCIE IP | ||
| 9720 | M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> | ||
| 9721 | L: linux-pci@vger.kernel.org | ||
| 9722 | S: Supported | ||
| 9723 | F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt | ||
| 9724 | F: drivers/pci/controller/pcie-mobiveil.c | ||
| 9725 | |||
| 9726 | MODULE SUPPORT | 9719 | MODULE SUPPORT |
| 9727 | M: Jessica Yu <jeyu@kernel.org> | 9720 | M: Jessica Yu <jeyu@kernel.org> |
| 9728 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next | 9721 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next |
| @@ -11137,6 +11130,13 @@ F: include/uapi/linux/switchtec_ioctl.h | |||
| 11137 | F: include/linux/switchtec.h | 11130 | F: include/linux/switchtec.h |
| 11138 | F: drivers/ntb/hw/mscc/ | 11131 | F: drivers/ntb/hw/mscc/ |
| 11139 | 11132 | ||
| 11133 | PCI DRIVER FOR MOBIVEIL PCIE IP | ||
| 11134 | M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> | ||
| 11135 | L: linux-pci@vger.kernel.org | ||
| 11136 | S: Supported | ||
| 11137 | F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt | ||
| 11138 | F: drivers/pci/controller/pcie-mobiveil.c | ||
| 11139 | |||
| 11140 | PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) | 11140 | PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) |
| 11141 | M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | 11141 | M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
| 11142 | M: Jason Cooper <jason@lakedaemon.net> | 11142 | M: Jason Cooper <jason@lakedaemon.net> |
| @@ -11203,8 +11203,14 @@ F: tools/pci/ | |||
| 11203 | 11203 | ||
| 11204 | PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC | 11204 | PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC |
| 11205 | M: Russell Currey <ruscur@russell.cc> | 11205 | M: Russell Currey <ruscur@russell.cc> |
| 11206 | M: Sam Bobroff <sbobroff@linux.ibm.com> | ||
| 11207 | M: Oliver O'Halloran <oohall@gmail.com> | ||
| 11206 | L: linuxppc-dev@lists.ozlabs.org | 11208 | L: linuxppc-dev@lists.ozlabs.org |
| 11207 | S: Supported | 11209 | S: Supported |
| 11210 | F: Documentation/PCI/pci-error-recovery.txt | ||
| 11211 | F: drivers/pci/pcie/aer.c | ||
| 11212 | F: drivers/pci/pcie/dpc.c | ||
| 11213 | F: drivers/pci/pcie/err.c | ||
| 11208 | F: Documentation/powerpc/eeh-pci-error-recovery.txt | 11214 | F: Documentation/powerpc/eeh-pci-error-recovery.txt |
| 11209 | F: arch/powerpc/kernel/eeh*.c | 11215 | F: arch/powerpc/kernel/eeh*.c |
| 11210 | F: arch/powerpc/platforms/*/eeh*.c | 11216 | F: arch/powerpc/platforms/*/eeh*.c |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index 1a951b00465d..1fffbba8d6a5 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
| @@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex); | |||
| 9 | 9 | ||
| 10 | extern unsigned int rtas_data; | 10 | extern unsigned int rtas_data; |
| 11 | extern unsigned long long memory_limit; | 11 | extern unsigned long long memory_limit; |
| 12 | extern bool init_mem_is_free; | ||
| 12 | extern unsigned long klimit; | 13 | extern unsigned long klimit; |
| 13 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 14 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
| 14 | 15 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index ea04dfb8c092..2d8fc8c9da7a 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
| @@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100) | |||
| 1314 | 1314 | ||
| 1315 | #ifdef CONFIG_PPC_DENORMALISATION | 1315 | #ifdef CONFIG_PPC_DENORMALISATION |
| 1316 | mfspr r10,SPRN_HSRR1 | 1316 | mfspr r10,SPRN_HSRR1 |
| 1317 | mfspr r11,SPRN_HSRR0 /* save HSRR0 */ | ||
| 1318 | andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ | 1317 | andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */ |
| 1319 | addi r11,r11,-4 /* HSRR0 is next instruction */ | ||
| 1320 | bne+ denorm_assist | 1318 | bne+ denorm_assist |
| 1321 | #endif | 1319 | #endif |
| 1322 | 1320 | ||
| @@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | |||
| 1382 | */ | 1380 | */ |
| 1383 | XVCPSGNDP32(32) | 1381 | XVCPSGNDP32(32) |
| 1384 | denorm_done: | 1382 | denorm_done: |
| 1383 | mfspr r11,SPRN_HSRR0 | ||
| 1384 | subi r11,r11,4 | ||
| 1385 | mtspr SPRN_HSRR0,r11 | 1385 | mtspr SPRN_HSRR0,r11 |
| 1386 | mtcrf 0x80,r9 | 1386 | mtcrf 0x80,r9 |
| 1387 | ld r9,PACA_EXGEN+EX_R9(r13) | 1387 | ld r9,PACA_EXGEN+EX_R9(r13) |
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 6bffbc5affe7..7716374786bd 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
| @@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim) | |||
| 176 | std r1, PACATMSCRATCH(r13) | 176 | std r1, PACATMSCRATCH(r13) |
| 177 | ld r1, PACAR1(r13) | 177 | ld r1, PACAR1(r13) |
| 178 | 178 | ||
| 179 | /* Store the PPR in r11 and reset to decent value */ | ||
| 180 | std r11, GPR11(r1) /* Temporary stash */ | 179 | std r11, GPR11(r1) /* Temporary stash */ |
| 181 | 180 | ||
| 181 | /* | ||
| 182 | * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is | ||
| 183 | * clobbered by an exception once we turn on MSR_RI below. | ||
| 184 | */ | ||
| 185 | ld r11, PACATMSCRATCH(r13) | ||
| 186 | std r11, GPR1(r1) | ||
| 187 | |||
| 188 | /* | ||
| 189 | * Store r13 away so we can free up the scratch SPR for the SLB fault | ||
| 190 | * handler (needed once we start accessing the thread_struct). | ||
| 191 | */ | ||
| 192 | GET_SCRATCH0(r11) | ||
| 193 | std r11, GPR13(r1) | ||
| 194 | |||
| 182 | /* Reset MSR RI so we can take SLB faults again */ | 195 | /* Reset MSR RI so we can take SLB faults again */ |
| 183 | li r11, MSR_RI | 196 | li r11, MSR_RI |
| 184 | mtmsrd r11, 1 | 197 | mtmsrd r11, 1 |
| 185 | 198 | ||
| 199 | /* Store the PPR in r11 and reset to decent value */ | ||
| 186 | mfspr r11, SPRN_PPR | 200 | mfspr r11, SPRN_PPR |
| 187 | HMT_MEDIUM | 201 | HMT_MEDIUM |
| 188 | 202 | ||
| @@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim) | |||
| 207 | SAVE_GPR(8, r7) /* user r8 */ | 221 | SAVE_GPR(8, r7) /* user r8 */ |
| 208 | SAVE_GPR(9, r7) /* user r9 */ | 222 | SAVE_GPR(9, r7) /* user r9 */ |
| 209 | SAVE_GPR(10, r7) /* user r10 */ | 223 | SAVE_GPR(10, r7) /* user r10 */ |
| 210 | ld r3, PACATMSCRATCH(r13) /* user r1 */ | 224 | ld r3, GPR1(r1) /* user r1 */ |
| 211 | ld r4, GPR7(r1) /* user r7 */ | 225 | ld r4, GPR7(r1) /* user r7 */ |
| 212 | ld r5, GPR11(r1) /* user r11 */ | 226 | ld r5, GPR11(r1) /* user r11 */ |
| 213 | ld r6, GPR12(r1) /* user r12 */ | 227 | ld r6, GPR12(r1) /* user r12 */ |
| 214 | GET_SCRATCH0(8) /* user r13 */ | 228 | ld r8, GPR13(r1) /* user r13 */ |
| 215 | std r3, GPR1(r7) | 229 | std r3, GPR1(r7) |
| 216 | std r4, GPR7(r7) | 230 | std r4, GPR7(r7) |
| 217 | std r5, GPR11(r7) | 231 | std r5, GPR11(r7) |
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 886ed94b9c13..d05c8af4ac51 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S | |||
| @@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic) | |||
| 443 | addc r0, r8, r9 | 443 | addc r0, r8, r9 |
| 444 | ld r10, 0(r4) | 444 | ld r10, 0(r4) |
| 445 | ld r11, 8(r4) | 445 | ld r11, 8(r4) |
| 446 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
| 447 | rotldi r5, r5, 8 | ||
| 448 | #endif | ||
| 446 | adde r0, r0, r10 | 449 | adde r0, r0, r10 |
| 447 | add r5, r5, r7 | 450 | add r5, r5, r7 |
| 448 | adde r0, r0, r11 | 451 | adde r0, r0, r11 |
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 850f3b8f4da5..6ae2777c220d 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c | |||
| @@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr, | |||
| 28 | { | 28 | { |
| 29 | int err; | 29 | int err; |
| 30 | 30 | ||
| 31 | /* Make sure we aren't patching a freed init section */ | ||
| 32 | if (init_mem_is_free && init_section_contains(exec_addr, 4)) { | ||
| 33 | pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr); | ||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | |||
| 31 | __put_user_size(instr, patch_addr, 4, err); | 37 | __put_user_size(instr, patch_addr, 4, err); |
| 32 | if (err) | 38 | if (err) |
| 33 | return err; | 39 | return err; |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 5c8530d0c611..04ccb274a620 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
| @@ -63,6 +63,7 @@ | |||
| 63 | #endif | 63 | #endif |
| 64 | 64 | ||
| 65 | unsigned long long memory_limit; | 65 | unsigned long long memory_limit; |
| 66 | bool init_mem_is_free; | ||
| 66 | 67 | ||
| 67 | #ifdef CONFIG_HIGHMEM | 68 | #ifdef CONFIG_HIGHMEM |
| 68 | pte_t *kmap_pte; | 69 | pte_t *kmap_pte; |
| @@ -396,6 +397,7 @@ void free_initmem(void) | |||
| 396 | { | 397 | { |
| 397 | ppc_md.progress = ppc_printk_progress; | 398 | ppc_md.progress = ppc_printk_progress; |
| 398 | mark_initmem_nx(); | 399 | mark_initmem_nx(); |
| 400 | init_mem_is_free = true; | ||
| 399 | free_initmem_default(POISON_FREE_INITMEM); | 401 | free_initmem_default(POISON_FREE_INITMEM); |
| 400 | } | 402 | } |
| 401 | 403 | ||
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 35ac5422903a..59d07bd5374a 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu) | |||
| 1204 | int new_nid; | 1204 | int new_nid; |
| 1205 | 1205 | ||
| 1206 | /* Use associativity from first thread for all siblings */ | 1206 | /* Use associativity from first thread for all siblings */ |
| 1207 | vphn_get_associativity(cpu, associativity); | 1207 | if (vphn_get_associativity(cpu, associativity)) |
| 1208 | return cpu_to_node(cpu); | ||
| 1209 | |||
| 1208 | new_nid = associativity_to_nid(associativity); | 1210 | new_nid = associativity_to_nid(associativity); |
| 1209 | if (new_nid < 0 || !node_possible(new_nid)) | 1211 | if (new_nid < 0 || !node_possible(new_nid)) |
| 1210 | new_nid = first_online_node; | 1212 | new_nid = first_online_node; |
| @@ -1452,7 +1454,8 @@ static struct timer_list topology_timer; | |||
| 1452 | 1454 | ||
| 1453 | static void reset_topology_timer(void) | 1455 | static void reset_topology_timer(void) |
| 1454 | { | 1456 | { |
| 1455 | mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); | 1457 | if (vphn_enabled) |
| 1458 | mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ); | ||
| 1456 | } | 1459 | } |
| 1457 | 1460 | ||
| 1458 | #ifdef CONFIG_SMP | 1461 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c index 333b1f80c435..b271b283c785 100644 --- a/arch/powerpc/mm/pkeys.c +++ b/arch/powerpc/mm/pkeys.c | |||
| @@ -45,7 +45,7 @@ static void scan_pkey_feature(void) | |||
| 45 | * Since any pkey can be used for data or execute, we will just treat | 45 | * Since any pkey can be used for data or execute, we will just treat |
| 46 | * all keys as equal and track them as one entity. | 46 | * all keys as equal and track them as one entity. |
| 47 | */ | 47 | */ |
| 48 | pkeys_total = be32_to_cpu(vals[0]); | 48 | pkeys_total = vals[0]; |
| 49 | pkeys_devtree_defined = true; | 49 | pkeys_devtree_defined = true; |
| 50 | } | 50 | } |
| 51 | 51 | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c index 6c5db1acbe8d..fe9691040f54 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c | |||
| @@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, | |||
| 276 | level_shift = entries_shift + 3; | 276 | level_shift = entries_shift + 3; |
| 277 | level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); | 277 | level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); |
| 278 | 278 | ||
| 279 | if ((level_shift - 3) * levels + page_shift >= 60) | 279 | if ((level_shift - 3) * levels + page_shift >= 55) |
| 280 | return -EINVAL; | 280 | return -EINVAL; |
| 281 | 281 | ||
| 282 | /* Allocate TCE table */ | 282 | /* Allocate TCE table */ |
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..c9fecd120d18 --- /dev/null +++ b/arch/riscv/include/asm/asm-prototypes.h | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _ASM_RISCV_PROTOTYPES_H | ||
| 3 | |||
| 4 | #include <linux/ftrace.h> | ||
| 5 | #include <asm-generic/asm-prototypes.h> | ||
| 6 | |||
| 7 | #endif /* _ASM_RISCV_PROTOTYPES_H */ | ||
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index eaa843a52907..a480356e0ed8 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S | |||
| @@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit) | |||
| 25 | push %ebx | 25 | push %ebx |
| 26 | push %ecx | 26 | push %ecx |
| 27 | push %edx | 27 | push %edx |
| 28 | push %edi | ||
| 29 | |||
| 30 | /* | ||
| 31 | * RIP-relative addressing is needed to access the encryption bit | ||
| 32 | * variable. Since we are running in 32-bit mode we need this call/pop | ||
| 33 | * sequence to get the proper relative addressing. | ||
| 34 | */ | ||
| 35 | call 1f | ||
| 36 | 1: popl %edi | ||
| 37 | subl $1b, %edi | ||
| 38 | |||
| 39 | movl enc_bit(%edi), %eax | ||
| 40 | cmpl $0, %eax | ||
| 41 | jge .Lsev_exit | ||
| 42 | 28 | ||
| 43 | /* Check if running under a hypervisor */ | 29 | /* Check if running under a hypervisor */ |
| 44 | movl $1, %eax | 30 | movl $1, %eax |
| @@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit) | |||
| 69 | 55 | ||
| 70 | movl %ebx, %eax | 56 | movl %ebx, %eax |
| 71 | andl $0x3f, %eax /* Return the encryption bit location */ | 57 | andl $0x3f, %eax /* Return the encryption bit location */ |
| 72 | movl %eax, enc_bit(%edi) | ||
| 73 | jmp .Lsev_exit | 58 | jmp .Lsev_exit |
| 74 | 59 | ||
| 75 | .Lno_sev: | 60 | .Lno_sev: |
| 76 | xor %eax, %eax | 61 | xor %eax, %eax |
| 77 | movl %eax, enc_bit(%edi) | ||
| 78 | 62 | ||
| 79 | .Lsev_exit: | 63 | .Lsev_exit: |
| 80 | pop %edi | ||
| 81 | pop %edx | 64 | pop %edx |
| 82 | pop %ecx | 65 | pop %ecx |
| 83 | pop %ebx | 66 | pop %ebx |
| @@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask) | |||
| 113 | ENDPROC(set_sev_encryption_mask) | 96 | ENDPROC(set_sev_encryption_mask) |
| 114 | 97 | ||
| 115 | .data | 98 | .data |
| 116 | enc_bit: | ||
| 117 | .int 0xffffffff | ||
| 118 | 99 | ||
| 119 | #ifdef CONFIG_AMD_MEM_ENCRYPT | 100 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
| 120 | .balign 8 | 101 | .balign 8 |
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 981ba5e8241b..8671de126eac 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | 36 | ||
| 37 | static int num_counters_llc; | 37 | static int num_counters_llc; |
| 38 | static int num_counters_nb; | 38 | static int num_counters_nb; |
| 39 | static bool l3_mask; | ||
| 39 | 40 | ||
| 40 | static HLIST_HEAD(uncore_unused_list); | 41 | static HLIST_HEAD(uncore_unused_list); |
| 41 | 42 | ||
| @@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event) | |||
| 209 | hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; | 210 | hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB; |
| 210 | hwc->idx = -1; | 211 | hwc->idx = -1; |
| 211 | 212 | ||
| 213 | /* | ||
| 214 | * SliceMask and ThreadMask need to be set for certain L3 events in | ||
| 215 | * Family 17h. For other events, the two fields do not affect the count. | ||
| 216 | */ | ||
| 217 | if (l3_mask) | ||
| 218 | hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK); | ||
| 219 | |||
| 212 | if (event->cpu < 0) | 220 | if (event->cpu < 0) |
| 213 | return -EINVAL; | 221 | return -EINVAL; |
| 214 | 222 | ||
| @@ -525,6 +533,7 @@ static int __init amd_uncore_init(void) | |||
| 525 | amd_llc_pmu.name = "amd_l3"; | 533 | amd_llc_pmu.name = "amd_l3"; |
| 526 | format_attr_event_df.show = &event_show_df; | 534 | format_attr_event_df.show = &event_show_df; |
| 527 | format_attr_event_l3.show = &event_show_l3; | 535 | format_attr_event_l3.show = &event_show_l3; |
| 536 | l3_mask = true; | ||
| 528 | } else { | 537 | } else { |
| 529 | num_counters_nb = NUM_COUNTERS_NB; | 538 | num_counters_nb = NUM_COUNTERS_NB; |
| 530 | num_counters_llc = NUM_COUNTERS_L2; | 539 | num_counters_llc = NUM_COUNTERS_L2; |
| @@ -532,6 +541,7 @@ static int __init amd_uncore_init(void) | |||
| 532 | amd_llc_pmu.name = "amd_l2"; | 541 | amd_llc_pmu.name = "amd_l2"; |
| 533 | format_attr_event_df = format_attr_event; | 542 | format_attr_event_df = format_attr_event; |
| 534 | format_attr_event_l3 = format_attr_event; | 543 | format_attr_event_l3 = format_attr_event; |
| 544 | l3_mask = false; | ||
| 535 | } | 545 | } |
| 536 | 546 | ||
| 537 | amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; | 547 | amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df; |
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 51d7c117e3c7..c07bee31abe8 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
| @@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { | |||
| 3061 | 3061 | ||
| 3062 | void bdx_uncore_cpu_init(void) | 3062 | void bdx_uncore_cpu_init(void) |
| 3063 | { | 3063 | { |
| 3064 | int pkg = topology_phys_to_logical_pkg(0); | 3064 | int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id); |
| 3065 | 3065 | ||
| 3066 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | 3066 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
| 3067 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | 3067 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
| @@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = { | |||
| 3931 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), | 3931 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3), |
| 3932 | }, | 3932 | }, |
| 3933 | { /* M3UPI0 Link 0 */ | 3933 | { /* M3UPI0 Link 0 */ |
| 3934 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), | 3934 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), |
| 3935 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0), | 3935 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0), |
| 3936 | }, | 3936 | }, |
| 3937 | { /* M3UPI0 Link 1 */ | 3937 | { /* M3UPI0 Link 1 */ |
| 3938 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), | 3938 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E), |
| 3939 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1), | 3939 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1), |
| 3940 | }, | 3940 | }, |
| 3941 | { /* M3UPI1 Link 2 */ | 3941 | { /* M3UPI1 Link 2 */ |
| 3942 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C), | 3942 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D), |
| 3943 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2), | 3943 | .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2), |
| 3944 | }, | 3944 | }, |
| 3945 | { /* end: all zeroes */ } | 3945 | { /* end: all zeroes */ } |
| 3946 | }; | 3946 | }; |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 12f54082f4c8..78241b736f2a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
| @@ -46,6 +46,14 @@ | |||
| 46 | #define INTEL_ARCH_EVENT_MASK \ | 46 | #define INTEL_ARCH_EVENT_MASK \ |
| 47 | (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) | 47 | (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) |
| 48 | 48 | ||
| 49 | #define AMD64_L3_SLICE_SHIFT 48 | ||
| 50 | #define AMD64_L3_SLICE_MASK \ | ||
| 51 | ((0xFULL) << AMD64_L3_SLICE_SHIFT) | ||
| 52 | |||
| 53 | #define AMD64_L3_THREAD_SHIFT 56 | ||
| 54 | #define AMD64_L3_THREAD_MASK \ | ||
| 55 | ((0xFFULL) << AMD64_L3_THREAD_SHIFT) | ||
| 56 | |||
| 49 | #define X86_RAW_EVENT_MASK \ | 57 | #define X86_RAW_EVENT_MASK \ |
| 50 | (ARCH_PERFMON_EVENTSEL_EVENT | \ | 58 | (ARCH_PERFMON_EVENTSEL_EVENT | \ |
| 51 | ARCH_PERFMON_EVENTSEL_UMASK | \ | 59 | ARCH_PERFMON_EVENTSEL_UMASK | \ |
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 94e1ed667b6e..41317c50a446 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
| @@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |||
| 322 | 322 | ||
| 323 | /* | 323 | /* |
| 324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and | 324 | * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and |
| 325 | * queue_hw_ctx after freeze the queue. So we could use q_usage_counter | 325 | * queue_hw_ctx after freeze the queue, so we use q_usage_counter |
| 326 | * to avoid race with it. __blk_mq_update_nr_hw_queues will users | 326 | * to avoid race with it. |
| 327 | * synchronize_rcu to ensure all of the users go out of the critical | ||
| 328 | * section below and see zeroed q_usage_counter. | ||
| 329 | */ | 327 | */ |
| 330 | rcu_read_lock(); | 328 | if (!percpu_ref_tryget(&q->q_usage_counter)) |
| 331 | if (percpu_ref_is_zero(&q->q_usage_counter)) { | ||
| 332 | rcu_read_unlock(); | ||
| 333 | return; | 329 | return; |
| 334 | } | ||
| 335 | 330 | ||
| 336 | queue_for_each_hw_ctx(q, hctx, i) { | 331 | queue_for_each_hw_ctx(q, hctx, i) { |
| 337 | struct blk_mq_tags *tags = hctx->tags; | 332 | struct blk_mq_tags *tags = hctx->tags; |
| @@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, | |||
| 347 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); | 342 | bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); |
| 348 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); | 343 | bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); |
| 349 | } | 344 | } |
| 350 | rcu_read_unlock(); | 345 | blk_queue_exit(q); |
| 351 | } | 346 | } |
| 352 | 347 | ||
| 353 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, | 348 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 85a1c1a59c72..e3c39ea8e17b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 1628 | BUG_ON(!rq->q); | 1628 | BUG_ON(!rq->q); |
| 1629 | if (rq->mq_ctx != this_ctx) { | 1629 | if (rq->mq_ctx != this_ctx) { |
| 1630 | if (this_ctx) { | 1630 | if (this_ctx) { |
| 1631 | trace_block_unplug(this_q, depth, from_schedule); | 1631 | trace_block_unplug(this_q, depth, !from_schedule); |
| 1632 | blk_mq_sched_insert_requests(this_q, this_ctx, | 1632 | blk_mq_sched_insert_requests(this_q, this_ctx, |
| 1633 | &ctx_list, | 1633 | &ctx_list, |
| 1634 | from_schedule); | 1634 | from_schedule); |
| @@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) | |||
| 1648 | * on 'ctx_list'. Do those. | 1648 | * on 'ctx_list'. Do those. |
| 1649 | */ | 1649 | */ |
| 1650 | if (this_ctx) { | 1650 | if (this_ctx) { |
| 1651 | trace_block_unplug(this_q, depth, from_schedule); | 1651 | trace_block_unplug(this_q, depth, !from_schedule); |
| 1652 | blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, | 1652 | blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, |
| 1653 | from_schedule); | 1653 | from_schedule); |
| 1654 | } | 1654 | } |
diff --git a/block/elevator.c b/block/elevator.c index 6a06b5d040e5..fae58b2f906f 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q) | |||
| 609 | 609 | ||
| 610 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) | 610 | while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) |
| 611 | ; | 611 | ; |
| 612 | if (q->nr_sorted && printed++ < 10) { | 612 | if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) { |
| 613 | printk(KERN_ERR "%s: forced dispatching is broken " | 613 | printk(KERN_ERR "%s: forced dispatching is broken " |
| 614 | "(nr_sorted=%u), please report this\n", | 614 | "(nr_sorted=%u), please report this\n", |
| 615 | q->elevator->type->elevator_name, q->nr_sorted); | 615 | q->elevator->type->elevator_name, q->nr_sorted); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a71d817e900d..429d20131c7e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info) | |||
| 2670 | list_del(&gnt_list_entry->node); | 2670 | list_del(&gnt_list_entry->node); |
| 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); | 2671 | gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); |
| 2672 | rinfo->persistent_gnts_c--; | 2672 | rinfo->persistent_gnts_c--; |
| 2673 | __free_page(gnt_list_entry->page); | 2673 | gnt_list_entry->gref = GRANT_INVALID_REF; |
| 2674 | kfree(gnt_list_entry); | 2674 | list_add_tail(&gnt_list_entry->node, &rinfo->grants); |
| 2675 | } | 2675 | } |
| 2676 | 2676 | ||
| 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); | 2677 | spin_unlock_irqrestore(&rinfo->ring_lock, flags); |
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index ec8a4376f74f..2fab18fae4fc 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c | |||
| @@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
| 180 | data->base = of_iomap(node, 0); | 180 | data->base = of_iomap(node, 0); |
| 181 | if (!data->base) { | 181 | if (!data->base) { |
| 182 | pr_err("Could not map PIT address\n"); | 182 | pr_err("Could not map PIT address\n"); |
| 183 | return -ENXIO; | 183 | ret = -ENXIO; |
| 184 | goto exit; | ||
| 184 | } | 185 | } |
| 185 | 186 | ||
| 186 | data->mck = of_clk_get(node, 0); | 187 | data->mck = of_clk_get(node, 0); |
| 187 | if (IS_ERR(data->mck)) { | 188 | if (IS_ERR(data->mck)) { |
| 188 | pr_err("Unable to get mck clk\n"); | 189 | pr_err("Unable to get mck clk\n"); |
| 189 | return PTR_ERR(data->mck); | 190 | ret = PTR_ERR(data->mck); |
| 191 | goto exit; | ||
| 190 | } | 192 | } |
| 191 | 193 | ||
| 192 | ret = clk_prepare_enable(data->mck); | 194 | ret = clk_prepare_enable(data->mck); |
| 193 | if (ret) { | 195 | if (ret) { |
| 194 | pr_err("Unable to enable mck\n"); | 196 | pr_err("Unable to enable mck\n"); |
| 195 | return ret; | 197 | goto exit; |
| 196 | } | 198 | } |
| 197 | 199 | ||
| 198 | /* Get the interrupts property */ | 200 | /* Get the interrupts property */ |
| 199 | data->irq = irq_of_parse_and_map(node, 0); | 201 | data->irq = irq_of_parse_and_map(node, 0); |
| 200 | if (!data->irq) { | 202 | if (!data->irq) { |
| 201 | pr_err("Unable to get IRQ from DT\n"); | 203 | pr_err("Unable to get IRQ from DT\n"); |
| 202 | return -EINVAL; | 204 | ret = -EINVAL; |
| 205 | goto exit; | ||
| 203 | } | 206 | } |
| 204 | 207 | ||
| 205 | /* | 208 | /* |
| @@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
| 227 | ret = clocksource_register_hz(&data->clksrc, pit_rate); | 230 | ret = clocksource_register_hz(&data->clksrc, pit_rate); |
| 228 | if (ret) { | 231 | if (ret) { |
| 229 | pr_err("Failed to register clocksource\n"); | 232 | pr_err("Failed to register clocksource\n"); |
| 230 | return ret; | 233 | goto exit; |
| 231 | } | 234 | } |
| 232 | 235 | ||
| 233 | /* Set up irq handler */ | 236 | /* Set up irq handler */ |
| @@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
| 236 | "at91_tick", data); | 239 | "at91_tick", data); |
| 237 | if (ret) { | 240 | if (ret) { |
| 238 | pr_err("Unable to setup IRQ\n"); | 241 | pr_err("Unable to setup IRQ\n"); |
| 239 | return ret; | 242 | clocksource_unregister(&data->clksrc); |
| 243 | goto exit; | ||
| 240 | } | 244 | } |
| 241 | 245 | ||
| 242 | /* Set up and register clockevents */ | 246 | /* Set up and register clockevents */ |
| @@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) | |||
| 254 | clockevents_register_device(&data->clkevt); | 258 | clockevents_register_device(&data->clkevt); |
| 255 | 259 | ||
| 256 | return 0; | 260 | return 0; |
| 261 | |||
| 262 | exit: | ||
| 263 | kfree(data); | ||
| 264 | return ret; | ||
| 257 | } | 265 | } |
| 258 | TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", | 266 | TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", |
| 259 | at91sam926x_pit_dt_init); | 267 | at91sam926x_pit_dt_init); |
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c index c020038ebfab..cf93f6419b51 100644 --- a/drivers/clocksource/timer-fttmr010.c +++ b/drivers/clocksource/timer-fttmr010.c | |||
| @@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles, | |||
| 130 | cr &= ~fttmr010->t1_enable_val; | 130 | cr &= ~fttmr010->t1_enable_val; |
| 131 | writel(cr, fttmr010->base + TIMER_CR); | 131 | writel(cr, fttmr010->base + TIMER_CR); |
| 132 | 132 | ||
| 133 | /* Setup the match register forward/backward in time */ | 133 | if (fttmr010->count_down) { |
| 134 | cr = readl(fttmr010->base + TIMER1_COUNT); | 134 | /* |
| 135 | if (fttmr010->count_down) | 135 | * ASPEED Timer Controller will load TIMER1_LOAD register |
| 136 | cr -= cycles; | 136 | * into TIMER1_COUNT register when the timer is re-enabled. |
| 137 | else | 137 | */ |
| 138 | cr += cycles; | 138 | writel(cycles, fttmr010->base + TIMER1_LOAD); |
| 139 | writel(cr, fttmr010->base + TIMER1_MATCH1); | 139 | } else { |
| 140 | /* Setup the match register forward in time */ | ||
| 141 | cr = readl(fttmr010->base + TIMER1_COUNT); | ||
| 142 | writel(cr + cycles, fttmr010->base + TIMER1_MATCH1); | ||
| 143 | } | ||
| 140 | 144 | ||
| 141 | /* Start */ | 145 | /* Start */ |
| 142 | cr = readl(fttmr010->base + TIMER_CR); | 146 | cr = readl(fttmr010->base + TIMER_CR); |
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c index 29e2e1a78a43..6949a9113dbb 100644 --- a/drivers/clocksource/timer-ti-32k.c +++ b/drivers/clocksource/timer-ti-32k.c | |||
| @@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np) | |||
| 97 | return -ENXIO; | 97 | return -ENXIO; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | if (!of_machine_is_compatible("ti,am43")) | ||
| 101 | ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; | ||
| 102 | |||
| 100 | ti_32k_timer.counter = ti_32k_timer.base; | 103 | ti_32k_timer.counter = ti_32k_timer.base; |
| 101 | 104 | ||
| 102 | /* | 105 | /* |
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c index a1830fa25fc5..2a3675c24032 100644 --- a/drivers/cpufreq/qcom-cpufreq-kryo.c +++ b/drivers/cpufreq/qcom-cpufreq-kryo.c | |||
| @@ -44,7 +44,7 @@ enum _msm8996_version { | |||
| 44 | 44 | ||
| 45 | struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; | 45 | struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; |
| 46 | 46 | ||
| 47 | static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) | 47 | static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void) |
| 48 | { | 48 | { |
| 49 | size_t len; | 49 | size_t len; |
| 50 | u32 *msm_id; | 50 | u32 *msm_id; |
| @@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void) | |||
| 222 | } | 222 | } |
| 223 | module_init(qcom_cpufreq_kryo_init); | 223 | module_init(qcom_cpufreq_kryo_init); |
| 224 | 224 | ||
| 225 | static void __init qcom_cpufreq_kryo_exit(void) | 225 | static void __exit qcom_cpufreq_kryo_exit(void) |
| 226 | { | 226 | { |
| 227 | platform_device_unregister(kryo_cpufreq_pdev); | 227 | platform_device_unregister(kryo_cpufreq_pdev); |
| 228 | platform_driver_unregister(&qcom_cpufreq_kryo_driver); | 228 | platform_driver_unregister(&qcom_cpufreq_kryo_driver); |
diff --git a/drivers/dax/device.c b/drivers/dax/device.c index bbe4d72ca105..948806e57cee 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c | |||
| @@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp, | |||
| 535 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); | 535 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); |
| 536 | } | 536 | } |
| 537 | 537 | ||
| 538 | static const struct address_space_operations dev_dax_aops = { | ||
| 539 | .set_page_dirty = noop_set_page_dirty, | ||
| 540 | .invalidatepage = noop_invalidatepage, | ||
| 541 | }; | ||
| 542 | |||
| 538 | static int dax_open(struct inode *inode, struct file *filp) | 543 | static int dax_open(struct inode *inode, struct file *filp) |
| 539 | { | 544 | { |
| 540 | struct dax_device *dax_dev = inode_dax(inode); | 545 | struct dax_device *dax_dev = inode_dax(inode); |
| @@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp) | |||
| 544 | dev_dbg(&dev_dax->dev, "trace\n"); | 549 | dev_dbg(&dev_dax->dev, "trace\n"); |
| 545 | inode->i_mapping = __dax_inode->i_mapping; | 550 | inode->i_mapping = __dax_inode->i_mapping; |
| 546 | inode->i_mapping->host = __dax_inode; | 551 | inode->i_mapping->host = __dax_inode; |
| 552 | inode->i_mapping->a_ops = &dev_dax_aops; | ||
| 547 | filp->f_mapping = inode->i_mapping; | 553 | filp->f_mapping = inode->i_mapping; |
| 548 | filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); | 554 | filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); |
| 549 | filp->private_data = dev_dax; | 555 | filp->private_data = dev_dax; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0cc5190f4f36..5f3f54073818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
| @@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) | |||
| 258 | { | 258 | { |
| 259 | int i; | 259 | int i; |
| 260 | 260 | ||
| 261 | cancel_delayed_work_sync(&adev->vce.idle_work); | ||
| 262 | |||
| 261 | if (adev->vce.vcpu_bo == NULL) | 263 | if (adev->vce.vcpu_bo == NULL) |
| 262 | return 0; | 264 | return 0; |
| 263 | 265 | ||
| @@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) | |||
| 268 | if (i == AMDGPU_MAX_VCE_HANDLES) | 270 | if (i == AMDGPU_MAX_VCE_HANDLES) |
| 269 | return 0; | 271 | return 0; |
| 270 | 272 | ||
| 271 | cancel_delayed_work_sync(&adev->vce.idle_work); | ||
| 272 | /* TODO: suspending running encoding sessions isn't supported */ | 273 | /* TODO: suspending running encoding sessions isn't supported */ |
| 273 | return -EINVAL; | 274 | return -EINVAL; |
| 274 | } | 275 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index fd654a4406db..400fc74bbae2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | |||
| @@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) | |||
| 153 | unsigned size; | 153 | unsigned size; |
| 154 | void *ptr; | 154 | void *ptr; |
| 155 | 155 | ||
| 156 | cancel_delayed_work_sync(&adev->vcn.idle_work); | ||
| 157 | |||
| 156 | if (adev->vcn.vcpu_bo == NULL) | 158 | if (adev->vcn.vcpu_bo == NULL) |
| 157 | return 0; | 159 | return 0; |
| 158 | 160 | ||
| 159 | cancel_delayed_work_sync(&adev->vcn.idle_work); | ||
| 160 | |||
| 161 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); | 161 | size = amdgpu_bo_size(adev->vcn.vcpu_bo); |
| 162 | ptr = adev->vcn.cpu_addr; | 162 | ptr = adev->vcn.cpu_addr; |
| 163 | 163 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 800f481a6995..96875950845a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, | |||
| 641 | return NULL; | 641 | return NULL; |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | static void emulated_link_detect(struct dc_link *link) | ||
| 645 | { | ||
| 646 | struct dc_sink_init_data sink_init_data = { 0 }; | ||
| 647 | struct display_sink_capability sink_caps = { 0 }; | ||
| 648 | enum dc_edid_status edid_status; | ||
| 649 | struct dc_context *dc_ctx = link->ctx; | ||
| 650 | struct dc_sink *sink = NULL; | ||
| 651 | struct dc_sink *prev_sink = NULL; | ||
| 652 | |||
| 653 | link->type = dc_connection_none; | ||
| 654 | prev_sink = link->local_sink; | ||
| 655 | |||
| 656 | if (prev_sink != NULL) | ||
| 657 | dc_sink_retain(prev_sink); | ||
| 658 | |||
| 659 | switch (link->connector_signal) { | ||
| 660 | case SIGNAL_TYPE_HDMI_TYPE_A: { | ||
| 661 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
| 662 | sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; | ||
| 663 | break; | ||
| 664 | } | ||
| 665 | |||
| 666 | case SIGNAL_TYPE_DVI_SINGLE_LINK: { | ||
| 667 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
| 668 | sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; | ||
| 669 | break; | ||
| 670 | } | ||
| 671 | |||
| 672 | case SIGNAL_TYPE_DVI_DUAL_LINK: { | ||
| 673 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
| 674 | sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; | ||
| 675 | break; | ||
| 676 | } | ||
| 677 | |||
| 678 | case SIGNAL_TYPE_LVDS: { | ||
| 679 | sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; | ||
| 680 | sink_caps.signal = SIGNAL_TYPE_LVDS; | ||
| 681 | break; | ||
| 682 | } | ||
| 683 | |||
| 684 | case SIGNAL_TYPE_EDP: { | ||
| 685 | sink_caps.transaction_type = | ||
| 686 | DDC_TRANSACTION_TYPE_I2C_OVER_AUX; | ||
| 687 | sink_caps.signal = SIGNAL_TYPE_EDP; | ||
| 688 | break; | ||
| 689 | } | ||
| 690 | |||
| 691 | case SIGNAL_TYPE_DISPLAY_PORT: { | ||
| 692 | sink_caps.transaction_type = | ||
| 693 | DDC_TRANSACTION_TYPE_I2C_OVER_AUX; | ||
| 694 | sink_caps.signal = SIGNAL_TYPE_VIRTUAL; | ||
| 695 | break; | ||
| 696 | } | ||
| 697 | |||
| 698 | default: | ||
| 699 | DC_ERROR("Invalid connector type! signal:%d\n", | ||
| 700 | link->connector_signal); | ||
| 701 | return; | ||
| 702 | } | ||
| 703 | |||
| 704 | sink_init_data.link = link; | ||
| 705 | sink_init_data.sink_signal = sink_caps.signal; | ||
| 706 | |||
| 707 | sink = dc_sink_create(&sink_init_data); | ||
| 708 | if (!sink) { | ||
| 709 | DC_ERROR("Failed to create sink!\n"); | ||
| 710 | return; | ||
| 711 | } | ||
| 712 | |||
| 713 | link->local_sink = sink; | ||
| 714 | |||
| 715 | edid_status = dm_helpers_read_local_edid( | ||
| 716 | link->ctx, | ||
| 717 | link, | ||
| 718 | sink); | ||
| 719 | |||
| 720 | if (edid_status != EDID_OK) | ||
| 721 | DC_ERROR("Failed to read EDID"); | ||
| 722 | |||
| 723 | } | ||
| 724 | |||
| 644 | static int dm_resume(void *handle) | 725 | static int dm_resume(void *handle) |
| 645 | { | 726 | { |
| 646 | struct amdgpu_device *adev = handle; | 727 | struct amdgpu_device *adev = handle; |
| @@ -654,6 +735,7 @@ static int dm_resume(void *handle) | |||
| 654 | struct drm_plane *plane; | 735 | struct drm_plane *plane; |
| 655 | struct drm_plane_state *new_plane_state; | 736 | struct drm_plane_state *new_plane_state; |
| 656 | struct dm_plane_state *dm_new_plane_state; | 737 | struct dm_plane_state *dm_new_plane_state; |
| 738 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
| 657 | int ret; | 739 | int ret; |
| 658 | int i; | 740 | int i; |
| 659 | 741 | ||
| @@ -684,7 +766,13 @@ static int dm_resume(void *handle) | |||
| 684 | continue; | 766 | continue; |
| 685 | 767 | ||
| 686 | mutex_lock(&aconnector->hpd_lock); | 768 | mutex_lock(&aconnector->hpd_lock); |
| 687 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); | 769 | if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) |
| 770 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
| 771 | |||
| 772 | if (aconnector->base.force && new_connection_type == dc_connection_none) | ||
| 773 | emulated_link_detect(aconnector->dc_link); | ||
| 774 | else | ||
| 775 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); | ||
| 688 | 776 | ||
| 689 | if (aconnector->fake_enable && aconnector->dc_link->local_sink) | 777 | if (aconnector->fake_enable && aconnector->dc_link->local_sink) |
| 690 | aconnector->fake_enable = false; | 778 | aconnector->fake_enable = false; |
| @@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param) | |||
| 922 | struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; | 1010 | struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; |
| 923 | struct drm_connector *connector = &aconnector->base; | 1011 | struct drm_connector *connector = &aconnector->base; |
| 924 | struct drm_device *dev = connector->dev; | 1012 | struct drm_device *dev = connector->dev; |
| 1013 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
| 925 | 1014 | ||
| 926 | /* In case of failure or MST no need to update connector status or notify the OS | 1015 | /* In case of failure or MST no need to update connector status or notify the OS |
| 927 | * since (for MST case) MST does this in it's own context. | 1016 | * since (for MST case) MST does this in it's own context. |
| @@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param) | |||
| 931 | if (aconnector->fake_enable) | 1020 | if (aconnector->fake_enable) |
| 932 | aconnector->fake_enable = false; | 1021 | aconnector->fake_enable = false; |
| 933 | 1022 | ||
| 934 | if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { | 1023 | if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) |
| 1024 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
| 1025 | |||
| 1026 | if (aconnector->base.force && new_connection_type == dc_connection_none) { | ||
| 1027 | emulated_link_detect(aconnector->dc_link); | ||
| 1028 | |||
| 1029 | |||
| 1030 | drm_modeset_lock_all(dev); | ||
| 1031 | dm_restore_drm_connector_state(dev, connector); | ||
| 1032 | drm_modeset_unlock_all(dev); | ||
| 1033 | |||
| 1034 | if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) | ||
| 1035 | drm_kms_helper_hotplug_event(dev); | ||
| 1036 | |||
| 1037 | } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { | ||
| 935 | amdgpu_dm_update_connector_after_detect(aconnector); | 1038 | amdgpu_dm_update_connector_after_detect(aconnector); |
| 936 | 1039 | ||
| 937 | 1040 | ||
| @@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param) | |||
| 1031 | struct drm_device *dev = connector->dev; | 1134 | struct drm_device *dev = connector->dev; |
| 1032 | struct dc_link *dc_link = aconnector->dc_link; | 1135 | struct dc_link *dc_link = aconnector->dc_link; |
| 1033 | bool is_mst_root_connector = aconnector->mst_mgr.mst_state; | 1136 | bool is_mst_root_connector = aconnector->mst_mgr.mst_state; |
| 1137 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
| 1034 | 1138 | ||
| 1035 | /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio | 1139 | /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio |
| 1036 | * conflict, after implement i2c helper, this mutex should be | 1140 | * conflict, after implement i2c helper, this mutex should be |
| @@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param) | |||
| 1042 | if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && | 1146 | if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && |
| 1043 | !is_mst_root_connector) { | 1147 | !is_mst_root_connector) { |
| 1044 | /* Downstream Port status changed. */ | 1148 | /* Downstream Port status changed. */ |
| 1045 | if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { | 1149 | if (!dc_link_detect_sink(dc_link, &new_connection_type)) |
| 1150 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
| 1151 | |||
| 1152 | if (aconnector->base.force && new_connection_type == dc_connection_none) { | ||
| 1153 | emulated_link_detect(dc_link); | ||
| 1154 | |||
| 1155 | if (aconnector->fake_enable) | ||
| 1156 | aconnector->fake_enable = false; | ||
| 1157 | |||
| 1158 | amdgpu_dm_update_connector_after_detect(aconnector); | ||
| 1159 | |||
| 1160 | |||
| 1161 | drm_modeset_lock_all(dev); | ||
| 1162 | dm_restore_drm_connector_state(dev, connector); | ||
| 1163 | drm_modeset_unlock_all(dev); | ||
| 1164 | |||
| 1165 | drm_kms_helper_hotplug_event(dev); | ||
| 1166 | } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { | ||
| 1046 | 1167 | ||
| 1047 | if (aconnector->fake_enable) | 1168 | if (aconnector->fake_enable) |
| 1048 | aconnector->fake_enable = false; | 1169 | aconnector->fake_enable = false; |
| @@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
| 1433 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | 1554 | struct amdgpu_mode_info *mode_info = &adev->mode_info; |
| 1434 | uint32_t link_cnt; | 1555 | uint32_t link_cnt; |
| 1435 | int32_t total_overlay_planes, total_primary_planes; | 1556 | int32_t total_overlay_planes, total_primary_planes; |
| 1557 | enum dc_connection_type new_connection_type = dc_connection_none; | ||
| 1436 | 1558 | ||
| 1437 | link_cnt = dm->dc->caps.max_links; | 1559 | link_cnt = dm->dc->caps.max_links; |
| 1438 | if (amdgpu_dm_mode_config_init(dm->adev)) { | 1560 | if (amdgpu_dm_mode_config_init(dm->adev)) { |
| @@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
| 1499 | 1621 | ||
| 1500 | link = dc_get_link_at_index(dm->dc, i); | 1622 | link = dc_get_link_at_index(dm->dc, i); |
| 1501 | 1623 | ||
| 1502 | if (dc_link_detect(link, DETECT_REASON_BOOT)) { | 1624 | if (!dc_link_detect_sink(link, &new_connection_type)) |
| 1625 | DRM_ERROR("KMS: Failed to detect connector\n"); | ||
| 1626 | |||
| 1627 | if (aconnector->base.force && new_connection_type == dc_connection_none) { | ||
| 1628 | emulated_link_detect(link); | ||
| 1629 | amdgpu_dm_update_connector_after_detect(aconnector); | ||
| 1630 | |||
| 1631 | } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { | ||
| 1503 | amdgpu_dm_update_connector_after_detect(aconnector); | 1632 | amdgpu_dm_update_connector_after_detect(aconnector); |
| 1504 | register_backlight_device(dm, link); | 1633 | register_backlight_device(dm, link); |
| 1505 | } | 1634 | } |
| @@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
| 2494 | if (dm_state && dm_state->freesync_capable) | 2623 | if (dm_state && dm_state->freesync_capable) |
| 2495 | stream->ignore_msa_timing_param = true; | 2624 | stream->ignore_msa_timing_param = true; |
| 2496 | finish: | 2625 | finish: |
| 2497 | if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) | 2626 | if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) |
| 2498 | dc_sink_release(sink); | 2627 | dc_sink_release(sink); |
| 2499 | 2628 | ||
| 2500 | return stream; | 2629 | return stream; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 37eaf72ace54..fced3c1c2ef5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
| @@ -195,7 +195,7 @@ static bool program_hpd_filter( | |||
| 195 | return result; | 195 | return result; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) | 198 | bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) |
| 199 | { | 199 | { |
| 200 | uint32_t is_hpd_high = 0; | 200 | uint32_t is_hpd_high = 0; |
| 201 | struct gpio *hpd_pin; | 201 | struct gpio *hpd_pin; |
| @@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) | |||
| 604 | if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) | 604 | if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) |
| 605 | return false; | 605 | return false; |
| 606 | 606 | ||
| 607 | if (false == detect_sink(link, &new_connection_type)) { | 607 | if (false == dc_link_detect_sink(link, &new_connection_type)) { |
| 608 | BREAK_TO_DEBUGGER(); | 608 | BREAK_TO_DEBUGGER(); |
| 609 | return false; | 609 | return false; |
| 610 | } | 610 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index d43cefbc43d3..1b48ab9aea89 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
| @@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); | |||
| 215 | 215 | ||
| 216 | bool dc_link_is_dp_sink_present(struct dc_link *link); | 216 | bool dc_link_is_dp_sink_present(struct dc_link *link); |
| 217 | 217 | ||
| 218 | bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type); | ||
| 218 | /* | 219 | /* |
| 219 | * DPCD access interfaces | 220 | * DPCD access interfaces |
| 220 | */ | 221 | */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 14384d9675a8..b2f308766a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
| @@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements( | |||
| 2560 | dc->prev_display_config = *pp_display_cfg; | 2560 | dc->prev_display_config = *pp_display_cfg; |
| 2561 | } | 2561 | } |
| 2562 | 2562 | ||
| 2563 | void dce110_set_bandwidth( | 2563 | static void dce110_set_bandwidth( |
| 2564 | struct dc *dc, | 2564 | struct dc *dc, |
| 2565 | struct dc_state *context, | 2565 | struct dc_state *context, |
| 2566 | bool decrease_allowed) | 2566 | bool decrease_allowed) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index e4c5db75c4c6..d6db3dbd9015 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h | |||
| @@ -68,11 +68,6 @@ void dce110_fill_display_configs( | |||
| 68 | const struct dc_state *context, | 68 | const struct dc_state *context, |
| 69 | struct dm_pp_display_configuration *pp_display_cfg); | 69 | struct dm_pp_display_configuration *pp_display_cfg); |
| 70 | 70 | ||
| 71 | void dce110_set_bandwidth( | ||
| 72 | struct dc *dc, | ||
| 73 | struct dc_state *context, | ||
| 74 | bool decrease_allowed); | ||
| 75 | |||
| 76 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); | 71 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); |
| 77 | 72 | ||
| 78 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); | 73 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 5853522a6182..eb0f5f9a973b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c | |||
| @@ -244,17 +244,6 @@ static void dce120_update_dchub( | |||
| 244 | dh_data->dchub_info_valid = false; | 244 | dh_data->dchub_info_valid = false; |
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | static void dce120_set_bandwidth( | ||
| 248 | struct dc *dc, | ||
| 249 | struct dc_state *context, | ||
| 250 | bool decrease_allowed) | ||
| 251 | { | ||
| 252 | if (context->stream_count <= 0) | ||
| 253 | return; | ||
| 254 | |||
| 255 | dce110_set_bandwidth(dc, context, decrease_allowed); | ||
| 256 | } | ||
| 257 | |||
| 258 | void dce120_hw_sequencer_construct(struct dc *dc) | 247 | void dce120_hw_sequencer_construct(struct dc *dc) |
| 259 | { | 248 | { |
| 260 | /* All registers used by dce11.2 match those in dce11 in offset and | 249 | /* All registers used by dce11.2 match those in dce11 in offset and |
| @@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc) | |||
| 263 | dce110_hw_sequencer_construct(dc); | 252 | dce110_hw_sequencer_construct(dc); |
| 264 | dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; | 253 | dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; |
| 265 | dc->hwss.update_dchub = dce120_update_dchub; | 254 | dc->hwss.update_dchub = dce120_update_dchub; |
| 266 | dc->hwss.set_bandwidth = dce120_set_bandwidth; | ||
| 267 | } | 255 | } |
| 268 | 256 | ||
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 08b5bb219816..94d6dabec2dc 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c | |||
| @@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev) | |||
| 754 | drm->irq_enabled = true; | 754 | drm->irq_enabled = true; |
| 755 | 755 | ||
| 756 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); | 756 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); |
| 757 | drm_crtc_vblank_reset(&malidp->crtc); | ||
| 757 | if (ret < 0) { | 758 | if (ret < 0) { |
| 758 | DRM_ERROR("failed to initialise vblank\n"); | 759 | DRM_ERROR("failed to initialise vblank\n"); |
| 759 | goto vblank_fail; | 760 | goto vblank_fail; |
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index c94a4422e0e9..2781e462c1ed 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c | |||
| @@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev, | |||
| 384 | 384 | ||
| 385 | static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, | 385 | static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, |
| 386 | dma_addr_t *addrs, s32 *pitches, | 386 | dma_addr_t *addrs, s32 *pitches, |
| 387 | int num_planes, u16 w, u16 h, u32 fmt_id) | 387 | int num_planes, u16 w, u16 h, u32 fmt_id, |
| 388 | const s16 *rgb2yuv_coeffs) | ||
| 388 | { | 389 | { |
| 389 | u32 base = MALIDP500_SE_MEMWRITE_BASE; | 390 | u32 base = MALIDP500_SE_MEMWRITE_BASE; |
| 390 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); | 391 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); |
| @@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, | |||
| 416 | 417 | ||
| 417 | malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), | 418 | malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), |
| 418 | MALIDP500_SE_MEMWRITE_OUT_SIZE); | 419 | MALIDP500_SE_MEMWRITE_OUT_SIZE); |
| 420 | |||
| 421 | if (rgb2yuv_coeffs) { | ||
| 422 | int i; | ||
| 423 | |||
| 424 | for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { | ||
| 425 | malidp_hw_write(hwdev, rgb2yuv_coeffs[i], | ||
| 426 | MALIDP500_SE_RGB_YUV_COEFFS + i * 4); | ||
| 427 | } | ||
| 428 | } | ||
| 429 | |||
| 419 | malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); | 430 | malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); |
| 420 | 431 | ||
| 421 | return 0; | 432 | return 0; |
| @@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev, | |||
| 658 | 669 | ||
| 659 | static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, | 670 | static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, |
| 660 | dma_addr_t *addrs, s32 *pitches, | 671 | dma_addr_t *addrs, s32 *pitches, |
| 661 | int num_planes, u16 w, u16 h, u32 fmt_id) | 672 | int num_planes, u16 w, u16 h, u32 fmt_id, |
| 673 | const s16 *rgb2yuv_coeffs) | ||
| 662 | { | 674 | { |
| 663 | u32 base = MALIDP550_SE_MEMWRITE_BASE; | 675 | u32 base = MALIDP550_SE_MEMWRITE_BASE; |
| 664 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); | 676 | u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); |
| @@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, | |||
| 689 | malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, | 701 | malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, |
| 690 | MALIDP550_SE_CONTROL); | 702 | MALIDP550_SE_CONTROL); |
| 691 | 703 | ||
| 704 | if (rgb2yuv_coeffs) { | ||
| 705 | int i; | ||
| 706 | |||
| 707 | for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) { | ||
| 708 | malidp_hw_write(hwdev, rgb2yuv_coeffs[i], | ||
| 709 | MALIDP550_SE_RGB_YUV_COEFFS + i * 4); | ||
| 710 | } | ||
| 711 | } | ||
| 712 | |||
| 692 | return 0; | 713 | return 0; |
| 693 | } | 714 | } |
| 694 | 715 | ||
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index ad2e96915d44..9fc94c08190f 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h | |||
| @@ -191,7 +191,8 @@ struct malidp_hw { | |||
| 191 | * @param fmt_id - internal format ID of output buffer | 191 | * @param fmt_id - internal format ID of output buffer |
| 192 | */ | 192 | */ |
| 193 | int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, | 193 | int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, |
| 194 | s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); | 194 | s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id, |
| 195 | const s16 *rgb2yuv_coeffs); | ||
| 195 | 196 | ||
| 196 | /* | 197 | /* |
| 197 | * Disable the writing to memory of the next frame's content. | 198 | * Disable the writing to memory of the next frame's content. |
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index ba6ae66387c9..91472e5e0c8b 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c | |||
| @@ -26,6 +26,8 @@ struct malidp_mw_connector_state { | |||
| 26 | s32 pitches[2]; | 26 | s32 pitches[2]; |
| 27 | u8 format; | 27 | u8 format; |
| 28 | u8 n_planes; | 28 | u8 n_planes; |
| 29 | bool rgb2yuv_initialized; | ||
| 30 | const s16 *rgb2yuv_coeffs; | ||
| 29 | }; | 31 | }; |
| 30 | 32 | ||
| 31 | static int malidp_mw_connector_get_modes(struct drm_connector *connector) | 33 | static int malidp_mw_connector_get_modes(struct drm_connector *connector) |
| @@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector) | |||
| 84 | static struct drm_connector_state * | 86 | static struct drm_connector_state * |
| 85 | malidp_mw_connector_duplicate_state(struct drm_connector *connector) | 87 | malidp_mw_connector_duplicate_state(struct drm_connector *connector) |
| 86 | { | 88 | { |
| 87 | struct malidp_mw_connector_state *mw_state; | 89 | struct malidp_mw_connector_state *mw_state, *mw_current_state; |
| 88 | 90 | ||
| 89 | if (WARN_ON(!connector->state)) | 91 | if (WARN_ON(!connector->state)) |
| 90 | return NULL; | 92 | return NULL; |
| @@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector) | |||
| 93 | if (!mw_state) | 95 | if (!mw_state) |
| 94 | return NULL; | 96 | return NULL; |
| 95 | 97 | ||
| 96 | /* No need to preserve any of our driver-local data */ | 98 | mw_current_state = to_mw_state(connector->state); |
| 99 | mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs; | ||
| 100 | mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized; | ||
| 101 | |||
| 97 | __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); | 102 | __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); |
| 98 | 103 | ||
| 99 | return &mw_state->base; | 104 | return &mw_state->base; |
| @@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = { | |||
| 108 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 113 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
| 109 | }; | 114 | }; |
| 110 | 115 | ||
| 116 | static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = { | ||
| 117 | 47, 157, 16, | ||
| 118 | -26, -87, 112, | ||
| 119 | 112, -102, -10, | ||
| 120 | 16, 128, 128 | ||
| 121 | }; | ||
| 122 | |||
| 111 | static int | 123 | static int |
| 112 | malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, | 124 | malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, |
| 113 | struct drm_crtc_state *crtc_state, | 125 | struct drm_crtc_state *crtc_state, |
| @@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, | |||
| 157 | } | 169 | } |
| 158 | mw_state->n_planes = n_planes; | 170 | mw_state->n_planes = n_planes; |
| 159 | 171 | ||
| 172 | if (fb->format->is_yuv) | ||
| 173 | mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited; | ||
| 174 | |||
| 160 | return 0; | 175 | return 0; |
| 161 | } | 176 | } |
| 162 | 177 | ||
| @@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm, | |||
| 239 | 254 | ||
| 240 | drm_writeback_queue_job(mw_conn, conn_state->writeback_job); | 255 | drm_writeback_queue_job(mw_conn, conn_state->writeback_job); |
| 241 | conn_state->writeback_job = NULL; | 256 | conn_state->writeback_job = NULL; |
| 242 | |||
| 243 | hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, | 257 | hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, |
| 244 | mw_state->pitches, mw_state->n_planes, | 258 | mw_state->pitches, mw_state->n_planes, |
| 245 | fb->width, fb->height, mw_state->format); | 259 | fb->width, fb->height, mw_state->format, |
| 260 | !mw_state->rgb2yuv_initialized ? | ||
| 261 | mw_state->rgb2yuv_coeffs : NULL); | ||
| 262 | mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs; | ||
| 246 | } else { | 263 | } else { |
| 247 | DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); | 264 | DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); |
| 248 | hwdev->hw->disable_memwrite(hwdev); | 265 | hwdev->hw->disable_memwrite(hwdev); |
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 3579d36b2a71..6ffe849774f2 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h | |||
| @@ -205,6 +205,7 @@ | |||
| 205 | #define MALIDP500_SE_BASE 0x00c00 | 205 | #define MALIDP500_SE_BASE 0x00c00 |
| 206 | #define MALIDP500_SE_CONTROL 0x00c0c | 206 | #define MALIDP500_SE_CONTROL 0x00c0c |
| 207 | #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c | 207 | #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c |
| 208 | #define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74 | ||
| 208 | #define MALIDP500_SE_MEMWRITE_BASE 0x00e00 | 209 | #define MALIDP500_SE_MEMWRITE_BASE 0x00e00 |
| 209 | #define MALIDP500_DC_IRQ_BASE 0x00f00 | 210 | #define MALIDP500_DC_IRQ_BASE 0x00f00 |
| 210 | #define MALIDP500_CONFIG_VALID 0x00f00 | 211 | #define MALIDP500_CONFIG_VALID 0x00f00 |
| @@ -238,6 +239,7 @@ | |||
| 238 | #define MALIDP550_SE_CONTROL 0x08010 | 239 | #define MALIDP550_SE_CONTROL 0x08010 |
| 239 | #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) | 240 | #define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) |
| 240 | #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 | 241 | #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 |
| 242 | #define MALIDP550_SE_RGB_YUV_COEFFS 0x08078 | ||
| 241 | #define MALIDP550_SE_MEMWRITE_BASE 0x08100 | 243 | #define MALIDP550_SE_MEMWRITE_BASE 0x08100 |
| 242 | #define MALIDP550_DC_BASE 0x0c000 | 244 | #define MALIDP550_DC_BASE 0x0c000 |
| 243 | #define MALIDP550_DC_CONTROL 0x0c010 | 245 | #define MALIDP550_DC_CONTROL 0x0c010 |
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index b902361dee6e..1d9a9d2fe0e0 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
| 25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
| 26 | 26 | ||
| 27 | #include <drm/drm_device.h> | ||
| 28 | #include <drm/drm_crtc.h> | 27 | #include <drm/drm_crtc.h> |
| 29 | #include <drm/drm_panel.h> | 28 | #include <drm/drm_panel.h> |
| 30 | 29 | ||
| @@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector) | |||
| 105 | if (panel->connector) | 104 | if (panel->connector) |
| 106 | return -EBUSY; | 105 | return -EBUSY; |
| 107 | 106 | ||
| 108 | panel->link = device_link_add(connector->dev->dev, panel->dev, 0); | ||
| 109 | if (!panel->link) { | ||
| 110 | dev_err(panel->dev, "failed to link panel to %s\n", | ||
| 111 | dev_name(connector->dev->dev)); | ||
| 112 | return -EINVAL; | ||
| 113 | } | ||
| 114 | |||
| 115 | panel->connector = connector; | 107 | panel->connector = connector; |
| 116 | panel->drm = connector->dev; | 108 | panel->drm = connector->dev; |
| 117 | 109 | ||
| @@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach); | |||
| 133 | */ | 125 | */ |
| 134 | int drm_panel_detach(struct drm_panel *panel) | 126 | int drm_panel_detach(struct drm_panel *panel) |
| 135 | { | 127 | { |
| 136 | device_link_del(panel->link); | ||
| 137 | |||
| 138 | panel->connector = NULL; | 128 | panel->connector = NULL; |
| 139 | panel->drm = NULL; | 129 | panel->drm = NULL; |
| 140 | 130 | ||
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index adb3cb27d31e..759278fef35a 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c | |||
| @@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj, | |||
| 97 | { | 97 | { |
| 98 | int ret; | 98 | int ret; |
| 99 | 99 | ||
| 100 | WARN_ON(*fence); | ||
| 101 | |||
| 100 | *fence = drm_syncobj_fence_get(syncobj); | 102 | *fence = drm_syncobj_fence_get(syncobj); |
| 101 | if (*fence) | 103 | if (*fence) |
| 102 | return 1; | 104 | return 1; |
| @@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs, | |||
| 743 | 745 | ||
| 744 | if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { | 746 | if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { |
| 745 | for (i = 0; i < count; ++i) { | 747 | for (i = 0; i < count; ++i) { |
| 748 | if (entries[i].fence) | ||
| 749 | continue; | ||
| 750 | |||
| 746 | drm_syncobj_fence_get_or_add_callback(syncobjs[i], | 751 | drm_syncobj_fence_get_or_add_callback(syncobjs[i], |
| 747 | &entries[i].fence, | 752 | &entries[i].fence, |
| 748 | &entries[i].syncobj_cb, | 753 | &entries[i].syncobj_cb, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 9b2720b41571..83c1f46670bf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
| @@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev) | |||
| 592 | struct device *dev = &pdev->dev; | 592 | struct device *dev = &pdev->dev; |
| 593 | struct component_match *match = NULL; | 593 | struct component_match *match = NULL; |
| 594 | 594 | ||
| 595 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
| 596 | |||
| 597 | if (!dev->platform_data) { | 595 | if (!dev->platform_data) { |
| 598 | struct device_node *core_node; | 596 | struct device_node *core_node; |
| 599 | 597 | ||
| @@ -655,13 +653,30 @@ static int __init etnaviv_init(void) | |||
| 655 | for_each_compatible_node(np, NULL, "vivante,gc") { | 653 | for_each_compatible_node(np, NULL, "vivante,gc") { |
| 656 | if (!of_device_is_available(np)) | 654 | if (!of_device_is_available(np)) |
| 657 | continue; | 655 | continue; |
| 658 | pdev = platform_device_register_simple("etnaviv", -1, | 656 | |
| 659 | NULL, 0); | 657 | pdev = platform_device_alloc("etnaviv", -1); |
| 660 | if (IS_ERR(pdev)) { | 658 | if (!pdev) { |
| 661 | ret = PTR_ERR(pdev); | 659 | ret = -ENOMEM; |
| 660 | of_node_put(np); | ||
| 661 | goto unregister_platform_driver; | ||
| 662 | } | ||
| 663 | pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40); | ||
| 664 | pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; | ||
| 665 | |||
| 666 | /* | ||
| 667 | * Apply the same DMA configuration to the virtual etnaviv | ||
| 668 | * device as the GPU we found. This assumes that all Vivante | ||
| 669 | * GPUs in the system share the same DMA constraints. | ||
| 670 | */ | ||
| 671 | of_dma_configure(&pdev->dev, np, true); | ||
| 672 | |||
| 673 | ret = platform_device_add(pdev); | ||
| 674 | if (ret) { | ||
| 675 | platform_device_put(pdev); | ||
| 662 | of_node_put(np); | 676 | of_node_put(np); |
| 663 | goto unregister_platform_driver; | 677 | goto unregister_platform_driver; |
| 664 | } | 678 | } |
| 679 | |||
| 665 | etnaviv_drm = pdev; | 680 | etnaviv_drm = pdev; |
| 666 | of_node_put(np); | 681 | of_node_put(np); |
| 667 | break; | 682 | break; |
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index da962aa2cef5..fc6b7f8b62fb 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
| @@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev) | |||
| 139 | th->thdev[i] = NULL; | 139 | th->thdev[i] = NULL; |
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | th->num_thdevs = lowest; | 142 | if (lowest >= 0) |
| 143 | th->num_thdevs = lowest; | ||
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | if (thdrv->attr_group) | 146 | if (thdrv->attr_group) |
| @@ -487,7 +488,7 @@ static const struct intel_th_subdevice { | |||
| 487 | .flags = IORESOURCE_MEM, | 488 | .flags = IORESOURCE_MEM, |
| 488 | }, | 489 | }, |
| 489 | { | 490 | { |
| 490 | .start = TH_MMIO_SW, | 491 | .start = 1, /* use resource[1] */ |
| 491 | .end = 0, | 492 | .end = 0, |
| 492 | .flags = IORESOURCE_MEM, | 493 | .flags = IORESOURCE_MEM, |
| 493 | }, | 494 | }, |
| @@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
| 580 | struct intel_th_device *thdev; | 581 | struct intel_th_device *thdev; |
| 581 | struct resource res[3]; | 582 | struct resource res[3]; |
| 582 | unsigned int req = 0; | 583 | unsigned int req = 0; |
| 584 | bool is64bit = false; | ||
| 583 | int r, err; | 585 | int r, err; |
| 584 | 586 | ||
| 585 | thdev = intel_th_device_alloc(th, subdev->type, subdev->name, | 587 | thdev = intel_th_device_alloc(th, subdev->type, subdev->name, |
| @@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
| 589 | 591 | ||
| 590 | thdev->drvdata = th->drvdata; | 592 | thdev->drvdata = th->drvdata; |
| 591 | 593 | ||
| 594 | for (r = 0; r < th->num_resources; r++) | ||
| 595 | if (th->resource[r].flags & IORESOURCE_MEM_64) { | ||
| 596 | is64bit = true; | ||
| 597 | break; | ||
| 598 | } | ||
| 599 | |||
| 592 | memcpy(res, subdev->res, | 600 | memcpy(res, subdev->res, |
| 593 | sizeof(struct resource) * subdev->nres); | 601 | sizeof(struct resource) * subdev->nres); |
| 594 | 602 | ||
| 595 | for (r = 0; r < subdev->nres; r++) { | 603 | for (r = 0; r < subdev->nres; r++) { |
| 596 | struct resource *devres = th->resource; | 604 | struct resource *devres = th->resource; |
| 597 | int bar = TH_MMIO_CONFIG; | 605 | int bar = 0; /* cut subdevices' MMIO from resource[0] */ |
| 598 | 606 | ||
| 599 | /* | 607 | /* |
| 600 | * Take .end == 0 to mean 'take the whole bar', | 608 | * Take .end == 0 to mean 'take the whole bar', |
| @@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th, | |||
| 603 | */ | 611 | */ |
| 604 | if (!res[r].end && res[r].flags == IORESOURCE_MEM) { | 612 | if (!res[r].end && res[r].flags == IORESOURCE_MEM) { |
| 605 | bar = res[r].start; | 613 | bar = res[r].start; |
| 614 | if (is64bit) | ||
| 615 | bar *= 2; | ||
| 606 | res[r].start = 0; | 616 | res[r].start = 0; |
| 607 | res[r].end = resource_size(&devres[bar]) - 1; | 617 | res[r].end = resource_size(&devres[bar]) - 1; |
| 608 | } | 618 | } |
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index c2e55e5d97f6..1cf6290d6435 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
| @@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
| 160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), | 160 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), |
| 161 | .driver_data = (kernel_ulong_t)&intel_th_2x, | 161 | .driver_data = (kernel_ulong_t)&intel_th_2x, |
| 162 | }, | 162 | }, |
| 163 | { | ||
| 164 | /* Ice Lake PCH */ | ||
| 165 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6), | ||
| 166 | .driver_data = (kernel_ulong_t)&intel_th_2x, | ||
| 167 | }, | ||
| 163 | { 0 }, | 168 | { 0 }, |
| 164 | }; | 169 | }; |
| 165 | 170 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 0bee1f4b914e..3208ad6ad540 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
| @@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) | |||
| 338 | } | 338 | } |
| 339 | 339 | ||
| 340 | /** | 340 | /** |
| 341 | * del_gid - Delete GID table entry | ||
| 342 | * | ||
| 343 | * @ib_dev: IB device whose GID entry to be deleted | ||
| 344 | * @port: Port number of the IB device | ||
| 345 | * @table: GID table of the IB device for a port | ||
| 346 | * @ix: GID entry index to delete | ||
| 347 | * | ||
| 348 | */ | ||
| 349 | static void del_gid(struct ib_device *ib_dev, u8 port, | ||
| 350 | struct ib_gid_table *table, int ix) | ||
| 351 | { | ||
| 352 | struct ib_gid_table_entry *entry; | ||
| 353 | |||
| 354 | lockdep_assert_held(&table->lock); | ||
| 355 | |||
| 356 | pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, | ||
| 357 | ib_dev->name, port, ix, | ||
| 358 | table->data_vec[ix]->attr.gid.raw); | ||
| 359 | |||
| 360 | write_lock_irq(&table->rwlock); | ||
| 361 | entry = table->data_vec[ix]; | ||
| 362 | entry->state = GID_TABLE_ENTRY_PENDING_DEL; | ||
| 363 | /* | ||
| 364 | * For non RoCE protocol, GID entry slot is ready to use. | ||
| 365 | */ | ||
| 366 | if (!rdma_protocol_roce(ib_dev, port)) | ||
| 367 | table->data_vec[ix] = NULL; | ||
| 368 | write_unlock_irq(&table->rwlock); | ||
| 369 | |||
| 370 | put_gid_entry_locked(entry); | ||
| 371 | } | ||
| 372 | |||
| 373 | /** | ||
| 341 | * add_modify_gid - Add or modify GID table entry | 374 | * add_modify_gid - Add or modify GID table entry |
| 342 | * | 375 | * |
| 343 | * @table: GID table in which GID to be added or modified | 376 | * @table: GID table in which GID to be added or modified |
| @@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table, | |||
| 358 | * this index. | 391 | * this index. |
| 359 | */ | 392 | */ |
| 360 | if (is_gid_entry_valid(table->data_vec[attr->index])) | 393 | if (is_gid_entry_valid(table->data_vec[attr->index])) |
| 361 | put_gid_entry(table->data_vec[attr->index]); | 394 | del_gid(attr->device, attr->port_num, table, attr->index); |
| 362 | 395 | ||
| 363 | /* | 396 | /* |
| 364 | * Some HCA's report multiple GID entries with only one valid GID, and | 397 | * Some HCA's report multiple GID entries with only one valid GID, and |
| @@ -386,39 +419,6 @@ done: | |||
| 386 | return ret; | 419 | return ret; |
| 387 | } | 420 | } |
| 388 | 421 | ||
| 389 | /** | ||
| 390 | * del_gid - Delete GID table entry | ||
| 391 | * | ||
| 392 | * @ib_dev: IB device whose GID entry to be deleted | ||
| 393 | * @port: Port number of the IB device | ||
| 394 | * @table: GID table of the IB device for a port | ||
| 395 | * @ix: GID entry index to delete | ||
| 396 | * | ||
| 397 | */ | ||
| 398 | static void del_gid(struct ib_device *ib_dev, u8 port, | ||
| 399 | struct ib_gid_table *table, int ix) | ||
| 400 | { | ||
| 401 | struct ib_gid_table_entry *entry; | ||
| 402 | |||
| 403 | lockdep_assert_held(&table->lock); | ||
| 404 | |||
| 405 | pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__, | ||
| 406 | ib_dev->name, port, ix, | ||
| 407 | table->data_vec[ix]->attr.gid.raw); | ||
| 408 | |||
| 409 | write_lock_irq(&table->rwlock); | ||
| 410 | entry = table->data_vec[ix]; | ||
| 411 | entry->state = GID_TABLE_ENTRY_PENDING_DEL; | ||
| 412 | /* | ||
| 413 | * For non RoCE protocol, GID entry slot is ready to use. | ||
| 414 | */ | ||
| 415 | if (!rdma_protocol_roce(ib_dev, port)) | ||
| 416 | table->data_vec[ix] = NULL; | ||
| 417 | write_unlock_irq(&table->rwlock); | ||
| 418 | |||
| 419 | put_gid_entry_locked(entry); | ||
| 420 | } | ||
| 421 | |||
| 422 | /* rwlock should be read locked, or lock should be held */ | 422 | /* rwlock should be read locked, or lock should be held */ |
| 423 | static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, | 423 | static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, |
| 424 | const struct ib_gid_attr *val, bool default_gid, | 424 | const struct ib_gid_attr *val, bool default_gid, |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5f437d1570fb..21863ddde63e 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
| @@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp) | |||
| 1759 | mutex_lock(&mut); | 1759 | mutex_lock(&mut); |
| 1760 | if (!ctx->closing) { | 1760 | if (!ctx->closing) { |
| 1761 | mutex_unlock(&mut); | 1761 | mutex_unlock(&mut); |
| 1762 | ucma_put_ctx(ctx); | ||
| 1763 | wait_for_completion(&ctx->comp); | ||
| 1762 | /* rdma_destroy_id ensures that no event handlers are | 1764 | /* rdma_destroy_id ensures that no event handlers are |
| 1763 | * inflight for that id before releasing it. | 1765 | * inflight for that id before releasing it. |
| 1764 | */ | 1766 | */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index a21d5214afc3..e012ca80f9d1 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
| 2027 | 2027 | ||
| 2028 | if ((cmd->base.attr_mask & IB_QP_CUR_STATE && | 2028 | if ((cmd->base.attr_mask & IB_QP_CUR_STATE && |
| 2029 | cmd->base.cur_qp_state > IB_QPS_ERR) || | 2029 | cmd->base.cur_qp_state > IB_QPS_ERR) || |
| 2030 | cmd->base.qp_state > IB_QPS_ERR) { | 2030 | (cmd->base.attr_mask & IB_QP_STATE && |
| 2031 | cmd->base.qp_state > IB_QPS_ERR)) { | ||
| 2031 | ret = -EINVAL; | 2032 | ret = -EINVAL; |
| 2032 | goto release_qp; | 2033 | goto release_qp; |
| 2033 | } | 2034 | } |
| 2034 | 2035 | ||
| 2035 | attr->qp_state = cmd->base.qp_state; | 2036 | if (cmd->base.attr_mask & IB_QP_STATE) |
| 2036 | attr->cur_qp_state = cmd->base.cur_qp_state; | 2037 | attr->qp_state = cmd->base.qp_state; |
| 2037 | attr->path_mtu = cmd->base.path_mtu; | 2038 | if (cmd->base.attr_mask & IB_QP_CUR_STATE) |
| 2038 | attr->path_mig_state = cmd->base.path_mig_state; | 2039 | attr->cur_qp_state = cmd->base.cur_qp_state; |
| 2039 | attr->qkey = cmd->base.qkey; | 2040 | if (cmd->base.attr_mask & IB_QP_PATH_MTU) |
| 2040 | attr->rq_psn = cmd->base.rq_psn; | 2041 | attr->path_mtu = cmd->base.path_mtu; |
| 2041 | attr->sq_psn = cmd->base.sq_psn; | 2042 | if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE) |
| 2042 | attr->dest_qp_num = cmd->base.dest_qp_num; | 2043 | attr->path_mig_state = cmd->base.path_mig_state; |
| 2043 | attr->qp_access_flags = cmd->base.qp_access_flags; | 2044 | if (cmd->base.attr_mask & IB_QP_QKEY) |
| 2044 | attr->pkey_index = cmd->base.pkey_index; | 2045 | attr->qkey = cmd->base.qkey; |
| 2045 | attr->alt_pkey_index = cmd->base.alt_pkey_index; | 2046 | if (cmd->base.attr_mask & IB_QP_RQ_PSN) |
| 2046 | attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; | 2047 | attr->rq_psn = cmd->base.rq_psn; |
| 2047 | attr->max_rd_atomic = cmd->base.max_rd_atomic; | 2048 | if (cmd->base.attr_mask & IB_QP_SQ_PSN) |
| 2048 | attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; | 2049 | attr->sq_psn = cmd->base.sq_psn; |
| 2049 | attr->min_rnr_timer = cmd->base.min_rnr_timer; | 2050 | if (cmd->base.attr_mask & IB_QP_DEST_QPN) |
| 2050 | attr->port_num = cmd->base.port_num; | 2051 | attr->dest_qp_num = cmd->base.dest_qp_num; |
| 2051 | attr->timeout = cmd->base.timeout; | 2052 | if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS) |
| 2052 | attr->retry_cnt = cmd->base.retry_cnt; | 2053 | attr->qp_access_flags = cmd->base.qp_access_flags; |
| 2053 | attr->rnr_retry = cmd->base.rnr_retry; | 2054 | if (cmd->base.attr_mask & IB_QP_PKEY_INDEX) |
| 2054 | attr->alt_port_num = cmd->base.alt_port_num; | 2055 | attr->pkey_index = cmd->base.pkey_index; |
| 2055 | attr->alt_timeout = cmd->base.alt_timeout; | 2056 | if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) |
| 2056 | attr->rate_limit = cmd->rate_limit; | 2057 | attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; |
| 2058 | if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | ||
| 2059 | attr->max_rd_atomic = cmd->base.max_rd_atomic; | ||
| 2060 | if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | ||
| 2061 | attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; | ||
| 2062 | if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER) | ||
| 2063 | attr->min_rnr_timer = cmd->base.min_rnr_timer; | ||
| 2064 | if (cmd->base.attr_mask & IB_QP_PORT) | ||
| 2065 | attr->port_num = cmd->base.port_num; | ||
| 2066 | if (cmd->base.attr_mask & IB_QP_TIMEOUT) | ||
| 2067 | attr->timeout = cmd->base.timeout; | ||
| 2068 | if (cmd->base.attr_mask & IB_QP_RETRY_CNT) | ||
| 2069 | attr->retry_cnt = cmd->base.retry_cnt; | ||
| 2070 | if (cmd->base.attr_mask & IB_QP_RNR_RETRY) | ||
| 2071 | attr->rnr_retry = cmd->base.rnr_retry; | ||
| 2072 | if (cmd->base.attr_mask & IB_QP_ALT_PATH) { | ||
| 2073 | attr->alt_port_num = cmd->base.alt_port_num; | ||
| 2074 | attr->alt_timeout = cmd->base.alt_timeout; | ||
| 2075 | attr->alt_pkey_index = cmd->base.alt_pkey_index; | ||
| 2076 | } | ||
| 2077 | if (cmd->base.attr_mask & IB_QP_RATE_LIMIT) | ||
| 2078 | attr->rate_limit = cmd->rate_limit; | ||
| 2057 | 2079 | ||
| 2058 | if (cmd->base.attr_mask & IB_QP_AV) | 2080 | if (cmd->base.attr_mask & IB_QP_AV) |
| 2059 | copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, | 2081 | copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6d974e2363df..50152c1b1004 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
| @@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp) | |||
| 440 | list_del(&entry->obj_list); | 440 | list_del(&entry->obj_list); |
| 441 | kfree(entry); | 441 | kfree(entry); |
| 442 | } | 442 | } |
| 443 | file->ev_queue.is_closed = 1; | ||
| 443 | spin_unlock_irq(&file->ev_queue.lock); | 444 | spin_unlock_irq(&file->ev_queue.lock); |
| 444 | 445 | ||
| 445 | uverbs_close_fd(filp); | 446 | uverbs_close_fd(filp); |
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 73ea6f0db88f..be854628a7c6 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c | |||
| @@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi) | |||
| 248 | kfree(rcu_dereference_protected(*slot, true)); | 248 | kfree(rcu_dereference_protected(*slot, true)); |
| 249 | radix_tree_iter_delete(&uapi->radix, &iter, slot); | 249 | radix_tree_iter_delete(&uapi->radix, &iter, slot); |
| 250 | } | 250 | } |
| 251 | kfree(uapi); | ||
| 251 | } | 252 | } |
| 252 | 253 | ||
| 253 | struct uverbs_api *uverbs_alloc_api( | 254 | struct uverbs_api *uverbs_alloc_api( |
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 20b9f31052bf..85cd1a3593d6 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c | |||
| @@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list); | |||
| 78 | /* Mutex to protect the list of bnxt_re devices added */ | 78 | /* Mutex to protect the list of bnxt_re devices added */ |
| 79 | static DEFINE_MUTEX(bnxt_re_dev_lock); | 79 | static DEFINE_MUTEX(bnxt_re_dev_lock); |
| 80 | static struct workqueue_struct *bnxt_re_wq; | 80 | static struct workqueue_struct *bnxt_re_wq; |
| 81 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait); | 81 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev); |
| 82 | 82 | ||
| 83 | /* SR-IOV helper functions */ | 83 | /* SR-IOV helper functions */ |
| 84 | 84 | ||
| @@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p) | |||
| 182 | if (!rdev) | 182 | if (!rdev) |
| 183 | return; | 183 | return; |
| 184 | 184 | ||
| 185 | bnxt_re_ib_unreg(rdev, false); | 185 | bnxt_re_ib_unreg(rdev); |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static void bnxt_re_stop_irq(void *handle) | 188 | static void bnxt_re_stop_irq(void *handle) |
| @@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = { | |||
| 251 | /* Driver registration routines used to let the networking driver (bnxt_en) | 251 | /* Driver registration routines used to let the networking driver (bnxt_en) |
| 252 | * to know that the RoCE driver is now installed | 252 | * to know that the RoCE driver is now installed |
| 253 | */ | 253 | */ |
| 254 | static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) | 254 | static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev) |
| 255 | { | 255 | { |
| 256 | struct bnxt_en_dev *en_dev; | 256 | struct bnxt_en_dev *en_dev; |
| 257 | int rc; | 257 | int rc; |
| @@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) | |||
| 260 | return -EINVAL; | 260 | return -EINVAL; |
| 261 | 261 | ||
| 262 | en_dev = rdev->en_dev; | 262 | en_dev = rdev->en_dev; |
| 263 | /* Acquire rtnl lock if it is not invokded from netdev event */ | ||
| 264 | if (lock_wait) | ||
| 265 | rtnl_lock(); | ||
| 266 | 263 | ||
| 267 | rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, | 264 | rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, |
| 268 | BNXT_ROCE_ULP); | 265 | BNXT_ROCE_ULP); |
| 269 | if (lock_wait) | ||
| 270 | rtnl_unlock(); | ||
| 271 | return rc; | 266 | return rc; |
| 272 | } | 267 | } |
| 273 | 268 | ||
| @@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev) | |||
| 281 | 276 | ||
| 282 | en_dev = rdev->en_dev; | 277 | en_dev = rdev->en_dev; |
| 283 | 278 | ||
| 284 | rtnl_lock(); | ||
| 285 | rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, | 279 | rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, |
| 286 | &bnxt_re_ulp_ops, rdev); | 280 | &bnxt_re_ulp_ops, rdev); |
| 287 | rtnl_unlock(); | ||
| 288 | return rc; | 281 | return rc; |
| 289 | } | 282 | } |
| 290 | 283 | ||
| 291 | static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) | 284 | static int bnxt_re_free_msix(struct bnxt_re_dev *rdev) |
| 292 | { | 285 | { |
| 293 | struct bnxt_en_dev *en_dev; | 286 | struct bnxt_en_dev *en_dev; |
| 294 | int rc; | 287 | int rc; |
| @@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) | |||
| 298 | 291 | ||
| 299 | en_dev = rdev->en_dev; | 292 | en_dev = rdev->en_dev; |
| 300 | 293 | ||
| 301 | if (lock_wait) | ||
| 302 | rtnl_lock(); | ||
| 303 | 294 | ||
| 304 | rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); | 295 | rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); |
| 305 | 296 | ||
| 306 | if (lock_wait) | ||
| 307 | rtnl_unlock(); | ||
| 308 | return rc; | 297 | return rc; |
| 309 | } | 298 | } |
| 310 | 299 | ||
| @@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) | |||
| 320 | 309 | ||
| 321 | num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); | 310 | num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); |
| 322 | 311 | ||
| 323 | rtnl_lock(); | ||
| 324 | num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, | 312 | num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, |
| 325 | rdev->msix_entries, | 313 | rdev->msix_entries, |
| 326 | num_msix_want); | 314 | num_msix_want); |
| @@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev) | |||
| 335 | } | 323 | } |
| 336 | rdev->num_msix = num_msix_got; | 324 | rdev->num_msix = num_msix_got; |
| 337 | done: | 325 | done: |
| 338 | rtnl_unlock(); | ||
| 339 | return rc; | 326 | return rc; |
| 340 | } | 327 | } |
| 341 | 328 | ||
| @@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, | |||
| 358 | fw_msg->timeout = timeout; | 345 | fw_msg->timeout = timeout; |
| 359 | } | 346 | } |
| 360 | 347 | ||
| 361 | static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, | 348 | static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id) |
| 362 | bool lock_wait) | ||
| 363 | { | 349 | { |
| 364 | struct bnxt_en_dev *en_dev = rdev->en_dev; | 350 | struct bnxt_en_dev *en_dev = rdev->en_dev; |
| 365 | struct hwrm_ring_free_input req = {0}; | 351 | struct hwrm_ring_free_input req = {0}; |
| 366 | struct hwrm_ring_free_output resp; | 352 | struct hwrm_ring_free_output resp; |
| 367 | struct bnxt_fw_msg fw_msg; | 353 | struct bnxt_fw_msg fw_msg; |
| 368 | bool do_unlock = false; | ||
| 369 | int rc = -EINVAL; | 354 | int rc = -EINVAL; |
| 370 | 355 | ||
| 371 | if (!en_dev) | 356 | if (!en_dev) |
| 372 | return rc; | 357 | return rc; |
| 373 | 358 | ||
| 374 | memset(&fw_msg, 0, sizeof(fw_msg)); | 359 | memset(&fw_msg, 0, sizeof(fw_msg)); |
| 375 | if (lock_wait) { | ||
| 376 | rtnl_lock(); | ||
| 377 | do_unlock = true; | ||
| 378 | } | ||
| 379 | 360 | ||
| 380 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); | 361 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); |
| 381 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; | 362 | req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; |
| @@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, | |||
| 386 | if (rc) | 367 | if (rc) |
| 387 | dev_err(rdev_to_dev(rdev), | 368 | dev_err(rdev_to_dev(rdev), |
| 388 | "Failed to free HW ring:%d :%#x", req.ring_id, rc); | 369 | "Failed to free HW ring:%d :%#x", req.ring_id, rc); |
| 389 | if (do_unlock) | ||
| 390 | rtnl_unlock(); | ||
| 391 | return rc; | 370 | return rc; |
| 392 | } | 371 | } |
| 393 | 372 | ||
| @@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, | |||
| 405 | return rc; | 384 | return rc; |
| 406 | 385 | ||
| 407 | memset(&fw_msg, 0, sizeof(fw_msg)); | 386 | memset(&fw_msg, 0, sizeof(fw_msg)); |
| 408 | rtnl_lock(); | ||
| 409 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); | 387 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); |
| 410 | req.enables = 0; | 388 | req.enables = 0; |
| 411 | req.page_tbl_addr = cpu_to_le64(dma_arr[0]); | 389 | req.page_tbl_addr = cpu_to_le64(dma_arr[0]); |
| @@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr, | |||
| 426 | if (!rc) | 404 | if (!rc) |
| 427 | *fw_ring_id = le16_to_cpu(resp.ring_id); | 405 | *fw_ring_id = le16_to_cpu(resp.ring_id); |
| 428 | 406 | ||
| 429 | rtnl_unlock(); | ||
| 430 | return rc; | 407 | return rc; |
| 431 | } | 408 | } |
| 432 | 409 | ||
| 433 | static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, | 410 | static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, |
| 434 | u32 fw_stats_ctx_id, bool lock_wait) | 411 | u32 fw_stats_ctx_id) |
| 435 | { | 412 | { |
| 436 | struct bnxt_en_dev *en_dev = rdev->en_dev; | 413 | struct bnxt_en_dev *en_dev = rdev->en_dev; |
| 437 | struct hwrm_stat_ctx_free_input req = {0}; | 414 | struct hwrm_stat_ctx_free_input req = {0}; |
| 438 | struct bnxt_fw_msg fw_msg; | 415 | struct bnxt_fw_msg fw_msg; |
| 439 | bool do_unlock = false; | ||
| 440 | int rc = -EINVAL; | 416 | int rc = -EINVAL; |
| 441 | 417 | ||
| 442 | if (!en_dev) | 418 | if (!en_dev) |
| 443 | return rc; | 419 | return rc; |
| 444 | 420 | ||
| 445 | memset(&fw_msg, 0, sizeof(fw_msg)); | 421 | memset(&fw_msg, 0, sizeof(fw_msg)); |
| 446 | if (lock_wait) { | ||
| 447 | rtnl_lock(); | ||
| 448 | do_unlock = true; | ||
| 449 | } | ||
| 450 | 422 | ||
| 451 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); | 423 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); |
| 452 | req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); | 424 | req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); |
| @@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, | |||
| 457 | dev_err(rdev_to_dev(rdev), | 429 | dev_err(rdev_to_dev(rdev), |
| 458 | "Failed to free HW stats context %#x", rc); | 430 | "Failed to free HW stats context %#x", rc); |
| 459 | 431 | ||
| 460 | if (do_unlock) | ||
| 461 | rtnl_unlock(); | ||
| 462 | return rc; | 432 | return rc; |
| 463 | } | 433 | } |
| 464 | 434 | ||
| @@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, | |||
| 478 | return rc; | 448 | return rc; |
| 479 | 449 | ||
| 480 | memset(&fw_msg, 0, sizeof(fw_msg)); | 450 | memset(&fw_msg, 0, sizeof(fw_msg)); |
| 481 | rtnl_lock(); | ||
| 482 | 451 | ||
| 483 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); | 452 | bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); |
| 484 | req.update_period_ms = cpu_to_le32(1000); | 453 | req.update_period_ms = cpu_to_le32(1000); |
| @@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, | |||
| 490 | if (!rc) | 459 | if (!rc) |
| 491 | *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); | 460 | *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); |
| 492 | 461 | ||
| 493 | rtnl_unlock(); | ||
| 494 | return rc; | 462 | return rc; |
| 495 | } | 463 | } |
| 496 | 464 | ||
| @@ -929,19 +897,19 @@ fail: | |||
| 929 | return rc; | 897 | return rc; |
| 930 | } | 898 | } |
| 931 | 899 | ||
| 932 | static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait) | 900 | static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev) |
| 933 | { | 901 | { |
| 934 | int i; | 902 | int i; |
| 935 | 903 | ||
| 936 | for (i = 0; i < rdev->num_msix - 1; i++) { | 904 | for (i = 0; i < rdev->num_msix - 1; i++) { |
| 937 | bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait); | 905 | bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id); |
| 938 | bnxt_qplib_free_nq(&rdev->nq[i]); | 906 | bnxt_qplib_free_nq(&rdev->nq[i]); |
| 939 | } | 907 | } |
| 940 | } | 908 | } |
| 941 | 909 | ||
| 942 | static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait) | 910 | static void bnxt_re_free_res(struct bnxt_re_dev *rdev) |
| 943 | { | 911 | { |
| 944 | bnxt_re_free_nq_res(rdev, lock_wait); | 912 | bnxt_re_free_nq_res(rdev); |
| 945 | 913 | ||
| 946 | if (rdev->qplib_res.dpi_tbl.max) { | 914 | if (rdev->qplib_res.dpi_tbl.max) { |
| 947 | bnxt_qplib_dealloc_dpi(&rdev->qplib_res, | 915 | bnxt_qplib_dealloc_dpi(&rdev->qplib_res, |
| @@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) | |||
| 1219 | return 0; | 1187 | return 0; |
| 1220 | } | 1188 | } |
| 1221 | 1189 | ||
| 1222 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) | 1190 | static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev) |
| 1223 | { | 1191 | { |
| 1224 | int i, rc; | 1192 | int i, rc; |
| 1225 | 1193 | ||
| @@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) | |||
| 1234 | cancel_delayed_work(&rdev->worker); | 1202 | cancel_delayed_work(&rdev->worker); |
| 1235 | 1203 | ||
| 1236 | bnxt_re_cleanup_res(rdev); | 1204 | bnxt_re_cleanup_res(rdev); |
| 1237 | bnxt_re_free_res(rdev, lock_wait); | 1205 | bnxt_re_free_res(rdev); |
| 1238 | 1206 | ||
| 1239 | if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { | 1207 | if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { |
| 1240 | rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); | 1208 | rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); |
| 1241 | if (rc) | 1209 | if (rc) |
| 1242 | dev_warn(rdev_to_dev(rdev), | 1210 | dev_warn(rdev_to_dev(rdev), |
| 1243 | "Failed to deinitialize RCFW: %#x", rc); | 1211 | "Failed to deinitialize RCFW: %#x", rc); |
| 1244 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, | 1212 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); |
| 1245 | lock_wait); | ||
| 1246 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); | 1213 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); |
| 1247 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); | 1214 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); |
| 1248 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait); | 1215 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id); |
| 1249 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); | 1216 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); |
| 1250 | } | 1217 | } |
| 1251 | if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { | 1218 | if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { |
| 1252 | rc = bnxt_re_free_msix(rdev, lock_wait); | 1219 | rc = bnxt_re_free_msix(rdev); |
| 1253 | if (rc) | 1220 | if (rc) |
| 1254 | dev_warn(rdev_to_dev(rdev), | 1221 | dev_warn(rdev_to_dev(rdev), |
| 1255 | "Failed to free MSI-X vectors: %#x", rc); | 1222 | "Failed to free MSI-X vectors: %#x", rc); |
| 1256 | } | 1223 | } |
| 1257 | if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { | 1224 | if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { |
| 1258 | rc = bnxt_re_unregister_netdev(rdev, lock_wait); | 1225 | rc = bnxt_re_unregister_netdev(rdev); |
| 1259 | if (rc) | 1226 | if (rc) |
| 1260 | dev_warn(rdev_to_dev(rdev), | 1227 | dev_warn(rdev_to_dev(rdev), |
| 1261 | "Failed to unregister with netdev: %#x", rc); | 1228 | "Failed to unregister with netdev: %#x", rc); |
| @@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
| 1276 | { | 1243 | { |
| 1277 | int i, j, rc; | 1244 | int i, j, rc; |
| 1278 | 1245 | ||
| 1246 | bool locked; | ||
| 1247 | |||
| 1248 | /* Acquire rtnl lock through out this function */ | ||
| 1249 | rtnl_lock(); | ||
| 1250 | locked = true; | ||
| 1251 | |||
| 1279 | /* Registered a new RoCE device instance to netdev */ | 1252 | /* Registered a new RoCE device instance to netdev */ |
| 1280 | rc = bnxt_re_register_netdev(rdev); | 1253 | rc = bnxt_re_register_netdev(rdev); |
| 1281 | if (rc) { | 1254 | if (rc) { |
| @@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
| 1374 | schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); | 1347 | schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); |
| 1375 | } | 1348 | } |
| 1376 | 1349 | ||
| 1350 | rtnl_unlock(); | ||
| 1351 | locked = false; | ||
| 1352 | |||
| 1377 | /* Register ib dev */ | 1353 | /* Register ib dev */ |
| 1378 | rc = bnxt_re_register_ib(rdev); | 1354 | rc = bnxt_re_register_ib(rdev); |
| 1379 | if (rc) { | 1355 | if (rc) { |
| 1380 | pr_err("Failed to register with IB: %#x\n", rc); | 1356 | pr_err("Failed to register with IB: %#x\n", rc); |
| 1381 | goto fail; | 1357 | goto fail; |
| 1382 | } | 1358 | } |
| 1359 | set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); | ||
| 1383 | dev_info(rdev_to_dev(rdev), "Device registered successfully"); | 1360 | dev_info(rdev_to_dev(rdev), "Device registered successfully"); |
| 1384 | for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { | 1361 | for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { |
| 1385 | rc = device_create_file(&rdev->ibdev.dev, | 1362 | rc = device_create_file(&rdev->ibdev.dev, |
| @@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
| 1395 | goto fail; | 1372 | goto fail; |
| 1396 | } | 1373 | } |
| 1397 | } | 1374 | } |
| 1398 | set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); | ||
| 1399 | ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, | 1375 | ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, |
| 1400 | &rdev->active_width); | 1376 | &rdev->active_width); |
| 1401 | set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); | 1377 | set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); |
| @@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev) | |||
| 1404 | 1380 | ||
| 1405 | return 0; | 1381 | return 0; |
| 1406 | free_sctx: | 1382 | free_sctx: |
| 1407 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true); | 1383 | bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); |
| 1408 | free_ctx: | 1384 | free_ctx: |
| 1409 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); | 1385 | bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); |
| 1410 | disable_rcfw: | 1386 | disable_rcfw: |
| 1411 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); | 1387 | bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); |
| 1412 | free_ring: | 1388 | free_ring: |
| 1413 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true); | 1389 | bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id); |
| 1414 | free_rcfw: | 1390 | free_rcfw: |
| 1415 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); | 1391 | bnxt_qplib_free_rcfw_channel(&rdev->rcfw); |
| 1416 | fail: | 1392 | fail: |
| 1417 | bnxt_re_ib_unreg(rdev, true); | 1393 | if (!locked) |
| 1394 | rtnl_lock(); | ||
| 1395 | bnxt_re_ib_unreg(rdev); | ||
| 1396 | rtnl_unlock(); | ||
| 1397 | |||
| 1418 | return rc; | 1398 | return rc; |
| 1419 | } | 1399 | } |
| 1420 | 1400 | ||
| @@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, | |||
| 1567 | */ | 1547 | */ |
| 1568 | if (atomic_read(&rdev->sched_count) > 0) | 1548 | if (atomic_read(&rdev->sched_count) > 0) |
| 1569 | goto exit; | 1549 | goto exit; |
| 1570 | bnxt_re_ib_unreg(rdev, false); | 1550 | bnxt_re_ib_unreg(rdev); |
| 1571 | bnxt_re_remove_one(rdev); | 1551 | bnxt_re_remove_one(rdev); |
| 1572 | bnxt_re_dev_unreg(rdev); | 1552 | bnxt_re_dev_unreg(rdev); |
| 1573 | break; | 1553 | break; |
| @@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void) | |||
| 1646 | */ | 1626 | */ |
| 1647 | flush_workqueue(bnxt_re_wq); | 1627 | flush_workqueue(bnxt_re_wq); |
| 1648 | bnxt_re_dev_stop(rdev); | 1628 | bnxt_re_dev_stop(rdev); |
| 1649 | bnxt_re_ib_unreg(rdev, true); | 1629 | /* Acquire the rtnl_lock as the L2 resources are freed here */ |
| 1630 | rtnl_lock(); | ||
| 1631 | bnxt_re_ib_unreg(rdev); | ||
| 1632 | rtnl_unlock(); | ||
| 1650 | bnxt_re_remove_one(rdev); | 1633 | bnxt_re_remove_one(rdev); |
| 1651 | bnxt_re_dev_unreg(rdev); | 1634 | bnxt_re_dev_unreg(rdev); |
| 1652 | } | 1635 | } |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2c19bf772451..e1668bcc2d13 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) | |||
| 6733 | struct hfi1_devdata *dd = ppd->dd; | 6733 | struct hfi1_devdata *dd = ppd->dd; |
| 6734 | struct send_context *sc; | 6734 | struct send_context *sc; |
| 6735 | int i; | 6735 | int i; |
| 6736 | int sc_flags; | ||
| 6736 | 6737 | ||
| 6737 | if (flags & FREEZE_SELF) | 6738 | if (flags & FREEZE_SELF) |
| 6738 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); | 6739 | write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); |
| @@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags) | |||
| 6743 | /* notify all SDMA engines that they are going into a freeze */ | 6744 | /* notify all SDMA engines that they are going into a freeze */ |
| 6744 | sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); | 6745 | sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); |
| 6745 | 6746 | ||
| 6747 | sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ? | ||
| 6748 | SCF_LINK_DOWN : 0); | ||
| 6746 | /* do halt pre-handling on all enabled send contexts */ | 6749 | /* do halt pre-handling on all enabled send contexts */ |
| 6747 | for (i = 0; i < dd->num_send_contexts; i++) { | 6750 | for (i = 0; i < dd->num_send_contexts; i++) { |
| 6748 | sc = dd->send_contexts[i].sc; | 6751 | sc = dd->send_contexts[i].sc; |
| 6749 | if (sc && (sc->flags & SCF_ENABLED)) | 6752 | if (sc && (sc->flags & SCF_ENABLED)) |
| 6750 | sc_stop(sc, SCF_FROZEN | SCF_HALTED); | 6753 | sc_stop(sc, sc_flags); |
| 6751 | } | 6754 | } |
| 6752 | 6755 | ||
| 6753 | /* Send context are frozen. Notify user space */ | 6756 | /* Send context are frozen. Notify user space */ |
| @@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state) | |||
| 10674 | add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); | 10677 | add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
| 10675 | 10678 | ||
| 10676 | handle_linkup_change(dd, 1); | 10679 | handle_linkup_change(dd, 1); |
| 10680 | pio_kernel_linkup(dd); | ||
| 10677 | 10681 | ||
| 10678 | /* | 10682 | /* |
| 10679 | * After link up, a new link width will have been set. | 10683 | * After link up, a new link width will have been set. |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index c2c1cba5b23b..752057647f09 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
| @@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op) | |||
| 86 | unsigned long flags; | 86 | unsigned long flags; |
| 87 | int write = 1; /* write sendctrl back */ | 87 | int write = 1; /* write sendctrl back */ |
| 88 | int flush = 0; /* re-read sendctrl to make sure it is flushed */ | 88 | int flush = 0; /* re-read sendctrl to make sure it is flushed */ |
| 89 | int i; | ||
| 89 | 90 | ||
| 90 | spin_lock_irqsave(&dd->sendctrl_lock, flags); | 91 | spin_lock_irqsave(&dd->sendctrl_lock, flags); |
| 91 | 92 | ||
| @@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op) | |||
| 95 | reg |= SEND_CTRL_SEND_ENABLE_SMASK; | 96 | reg |= SEND_CTRL_SEND_ENABLE_SMASK; |
| 96 | /* Fall through */ | 97 | /* Fall through */ |
| 97 | case PSC_DATA_VL_ENABLE: | 98 | case PSC_DATA_VL_ENABLE: |
| 99 | mask = 0; | ||
| 100 | for (i = 0; i < ARRAY_SIZE(dd->vld); i++) | ||
| 101 | if (!dd->vld[i].mtu) | ||
| 102 | mask |= BIT_ULL(i); | ||
| 98 | /* Disallow sending on VLs not enabled */ | 103 | /* Disallow sending on VLs not enabled */ |
| 99 | mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << | 104 | mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) << |
| 100 | SEND_CTRL_UNSUPPORTED_VL_SHIFT; | 105 | SEND_CTRL_UNSUPPORTED_VL_SHIFT; |
| 101 | reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; | 106 | reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; |
| 102 | break; | 107 | break; |
| 103 | case PSC_GLOBAL_DISABLE: | 108 | case PSC_GLOBAL_DISABLE: |
| @@ -921,20 +926,18 @@ void sc_free(struct send_context *sc) | |||
| 921 | void sc_disable(struct send_context *sc) | 926 | void sc_disable(struct send_context *sc) |
| 922 | { | 927 | { |
| 923 | u64 reg; | 928 | u64 reg; |
| 924 | unsigned long flags; | ||
| 925 | struct pio_buf *pbuf; | 929 | struct pio_buf *pbuf; |
| 926 | 930 | ||
| 927 | if (!sc) | 931 | if (!sc) |
| 928 | return; | 932 | return; |
| 929 | 933 | ||
| 930 | /* do all steps, even if already disabled */ | 934 | /* do all steps, even if already disabled */ |
| 931 | spin_lock_irqsave(&sc->alloc_lock, flags); | 935 | spin_lock_irq(&sc->alloc_lock); |
| 932 | reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); | 936 | reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); |
| 933 | reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); | 937 | reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); |
| 934 | sc->flags &= ~SCF_ENABLED; | 938 | sc->flags &= ~SCF_ENABLED; |
| 935 | sc_wait_for_packet_egress(sc, 1); | 939 | sc_wait_for_packet_egress(sc, 1); |
| 936 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); | 940 | write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); |
| 937 | spin_unlock_irqrestore(&sc->alloc_lock, flags); | ||
| 938 | 941 | ||
| 939 | /* | 942 | /* |
| 940 | * Flush any waiters. Once the context is disabled, | 943 | * Flush any waiters. Once the context is disabled, |
| @@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc) | |||
| 944 | * proceed with the flush. | 947 | * proceed with the flush. |
| 945 | */ | 948 | */ |
| 946 | udelay(1); | 949 | udelay(1); |
| 947 | spin_lock_irqsave(&sc->release_lock, flags); | 950 | spin_lock(&sc->release_lock); |
| 948 | if (sc->sr) { /* this context has a shadow ring */ | 951 | if (sc->sr) { /* this context has a shadow ring */ |
| 949 | while (sc->sr_tail != sc->sr_head) { | 952 | while (sc->sr_tail != sc->sr_head) { |
| 950 | pbuf = &sc->sr[sc->sr_tail].pbuf; | 953 | pbuf = &sc->sr[sc->sr_tail].pbuf; |
| @@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc) | |||
| 955 | sc->sr_tail = 0; | 958 | sc->sr_tail = 0; |
| 956 | } | 959 | } |
| 957 | } | 960 | } |
| 958 | spin_unlock_irqrestore(&sc->release_lock, flags); | 961 | spin_unlock(&sc->release_lock); |
| 962 | spin_unlock_irq(&sc->alloc_lock); | ||
| 959 | } | 963 | } |
| 960 | 964 | ||
| 961 | /* return SendEgressCtxtStatus.PacketOccupancy */ | 965 | /* return SendEgressCtxtStatus.PacketOccupancy */ |
| @@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd) | |||
| 1178 | sc = dd->send_contexts[i].sc; | 1182 | sc = dd->send_contexts[i].sc; |
| 1179 | if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) | 1183 | if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) |
| 1180 | continue; | 1184 | continue; |
| 1185 | if (sc->flags & SCF_LINK_DOWN) | ||
| 1186 | continue; | ||
| 1181 | 1187 | ||
| 1182 | sc_enable(sc); /* will clear the sc frozen flag */ | 1188 | sc_enable(sc); /* will clear the sc frozen flag */ |
| 1183 | } | 1189 | } |
| 1184 | } | 1190 | } |
| 1185 | 1191 | ||
| 1192 | /** | ||
| 1193 | * pio_kernel_linkup() - Re-enable send contexts after linkup event | ||
| 1194 | * @dd: valid devive data | ||
| 1195 | * | ||
| 1196 | * When the link goes down, the freeze path is taken. However, a link down | ||
| 1197 | * event is different from a freeze because if the send context is re-enabled | ||
| 1198 | * whowever is sending data will start sending data again, which will hang | ||
| 1199 | * any QP that is sending data. | ||
| 1200 | * | ||
| 1201 | * The freeze path now looks at the type of event that occurs and takes this | ||
| 1202 | * path for link down event. | ||
| 1203 | */ | ||
| 1204 | void pio_kernel_linkup(struct hfi1_devdata *dd) | ||
| 1205 | { | ||
| 1206 | struct send_context *sc; | ||
| 1207 | int i; | ||
| 1208 | |||
| 1209 | for (i = 0; i < dd->num_send_contexts; i++) { | ||
| 1210 | sc = dd->send_contexts[i].sc; | ||
| 1211 | if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER) | ||
| 1212 | continue; | ||
| 1213 | |||
| 1214 | sc_enable(sc); /* will clear the sc link down flag */ | ||
| 1215 | } | ||
| 1216 | } | ||
| 1217 | |||
| 1186 | /* | 1218 | /* |
| 1187 | * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. | 1219 | * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. |
| 1188 | * Returns: | 1220 | * Returns: |
| @@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag) | |||
| 1382 | { | 1414 | { |
| 1383 | unsigned long flags; | 1415 | unsigned long flags; |
| 1384 | 1416 | ||
| 1385 | /* mark the context */ | ||
| 1386 | sc->flags |= flag; | ||
| 1387 | |||
| 1388 | /* stop buffer allocations */ | 1417 | /* stop buffer allocations */ |
| 1389 | spin_lock_irqsave(&sc->alloc_lock, flags); | 1418 | spin_lock_irqsave(&sc->alloc_lock, flags); |
| 1419 | /* mark the context */ | ||
| 1420 | sc->flags |= flag; | ||
| 1390 | sc->flags &= ~SCF_ENABLED; | 1421 | sc->flags &= ~SCF_ENABLED; |
| 1391 | spin_unlock_irqrestore(&sc->alloc_lock, flags); | 1422 | spin_unlock_irqrestore(&sc->alloc_lock, flags); |
| 1392 | wake_up(&sc->halt_wait); | 1423 | wake_up(&sc->halt_wait); |
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index 058b08f459ab..aaf372c3e5d6 100644 --- a/drivers/infiniband/hw/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h | |||
| @@ -139,6 +139,7 @@ struct send_context { | |||
| 139 | #define SCF_IN_FREE 0x02 | 139 | #define SCF_IN_FREE 0x02 |
| 140 | #define SCF_HALTED 0x04 | 140 | #define SCF_HALTED 0x04 |
| 141 | #define SCF_FROZEN 0x08 | 141 | #define SCF_FROZEN 0x08 |
| 142 | #define SCF_LINK_DOWN 0x10 | ||
| 142 | 143 | ||
| 143 | struct send_context_info { | 144 | struct send_context_info { |
| 144 | struct send_context *sc; /* allocated working context */ | 145 | struct send_context *sc; /* allocated working context */ |
| @@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc); | |||
| 306 | void pio_reset_all(struct hfi1_devdata *dd); | 307 | void pio_reset_all(struct hfi1_devdata *dd); |
| 307 | void pio_freeze(struct hfi1_devdata *dd); | 308 | void pio_freeze(struct hfi1_devdata *dd); |
| 308 | void pio_kernel_unfreeze(struct hfi1_devdata *dd); | 309 | void pio_kernel_unfreeze(struct hfi1_devdata *dd); |
| 310 | void pio_kernel_linkup(struct hfi1_devdata *dd); | ||
| 309 | 311 | ||
| 310 | /* global PIO send control operations */ | 312 | /* global PIO send control operations */ |
| 311 | #define PSC_GLOBAL_ENABLE 0 | 313 | #define PSC_GLOBAL_ENABLE 0 |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a3a7b33196d6..5c88706121c1 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
| @@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) | |||
| 828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { | 828 | if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { |
| 829 | if (++req->iov_idx == req->data_iovs) { | 829 | if (++req->iov_idx == req->data_iovs) { |
| 830 | ret = -EFAULT; | 830 | ret = -EFAULT; |
| 831 | goto free_txreq; | 831 | goto free_tx; |
| 832 | } | 832 | } |
| 833 | iovec = &req->iovs[req->iov_idx]; | 833 | iovec = &req->iovs[req->iov_idx]; |
| 834 | WARN_ON(iovec->offset); | 834 | WARN_ON(iovec->offset); |
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 13374c727b14..a7c586a5589d 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c | |||
| @@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) | |||
| 1582 | struct hfi1_pportdata *ppd; | 1582 | struct hfi1_pportdata *ppd; |
| 1583 | struct hfi1_devdata *dd; | 1583 | struct hfi1_devdata *dd; |
| 1584 | u8 sc5; | 1584 | u8 sc5; |
| 1585 | u8 sl; | ||
| 1585 | 1586 | ||
| 1586 | if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && | 1587 | if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && |
| 1587 | !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) | 1588 | !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) |
| @@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr) | |||
| 1590 | /* test the mapping for validity */ | 1591 | /* test the mapping for validity */ |
| 1591 | ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); | 1592 | ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); |
| 1592 | ppd = ppd_from_ibp(ibp); | 1593 | ppd = ppd_from_ibp(ibp); |
| 1593 | sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)]; | ||
| 1594 | dd = dd_from_ppd(ppd); | 1594 | dd = dd_from_ppd(ppd); |
| 1595 | |||
| 1596 | sl = rdma_ah_get_sl(ah_attr); | ||
| 1597 | if (sl >= ARRAY_SIZE(ibp->sl_to_sc)) | ||
| 1598 | return -EINVAL; | ||
| 1599 | |||
| 1600 | sc5 = ibp->sl_to_sc[sl]; | ||
| 1595 | if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) | 1601 | if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) |
| 1596 | return -EINVAL; | 1602 | return -EINVAL; |
| 1597 | return 0; | 1603 | return 0; |
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index ac116d63e466..f2f11e652dcd 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c | |||
| @@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( | |||
| 723 | attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); | 723 | attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); |
| 724 | struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); | 724 | struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); |
| 725 | struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); | 725 | struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); |
| 726 | u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; | ||
| 726 | struct devx_obj *obj; | 727 | struct devx_obj *obj; |
| 727 | int err; | 728 | int err; |
| 728 | 729 | ||
| @@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( | |||
| 754 | 755 | ||
| 755 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); | 756 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); |
| 756 | if (err) | 757 | if (err) |
| 757 | goto obj_free; | 758 | goto obj_destroy; |
| 758 | 759 | ||
| 759 | return 0; | 760 | return 0; |
| 760 | 761 | ||
| 762 | obj_destroy: | ||
| 763 | mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); | ||
| 761 | obj_free: | 764 | obj_free: |
| 762 | kfree(obj); | 765 | kfree(obj); |
| 763 | return err; | 766 | return err; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 444d16520506..0b34e909505f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
| 2951 | { | 2951 | { |
| 2952 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 2952 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
| 2953 | struct srp_rdma_ch *ch; | 2953 | struct srp_rdma_ch *ch; |
| 2954 | int i; | 2954 | int i, j; |
| 2955 | u8 status; | 2955 | u8 status; |
| 2956 | 2956 | ||
| 2957 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); | 2957 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); |
| @@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
| 2965 | 2965 | ||
| 2966 | for (i = 0; i < target->ch_count; i++) { | 2966 | for (i = 0; i < target->ch_count; i++) { |
| 2967 | ch = &target->ch[i]; | 2967 | ch = &target->ch[i]; |
| 2968 | for (i = 0; i < target->req_ring_size; ++i) { | 2968 | for (j = 0; j < target->req_ring_size; ++j) { |
| 2969 | struct srp_request *req = &ch->req_ring[i]; | 2969 | struct srp_request *req = &ch->req_ring[j]; |
| 2970 | 2970 | ||
| 2971 | srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); | 2971 | srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); |
| 2972 | } | 2972 | } |
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c index 6f62da2909ec..6caee807cafa 100644 --- a/drivers/input/keyboard/atakbd.c +++ b/drivers/input/keyboard/atakbd.c | |||
| @@ -75,8 +75,7 @@ MODULE_LICENSE("GPL"); | |||
| 75 | */ | 75 | */ |
| 76 | 76 | ||
| 77 | 77 | ||
| 78 | static unsigned char atakbd_keycode[0x72] = { /* American layout */ | 78 | static unsigned char atakbd_keycode[0x73] = { /* American layout */ |
| 79 | [0] = KEY_GRAVE, | ||
| 80 | [1] = KEY_ESC, | 79 | [1] = KEY_ESC, |
| 81 | [2] = KEY_1, | 80 | [2] = KEY_1, |
| 82 | [3] = KEY_2, | 81 | [3] = KEY_2, |
| @@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */ | |||
| 117 | [38] = KEY_L, | 116 | [38] = KEY_L, |
| 118 | [39] = KEY_SEMICOLON, | 117 | [39] = KEY_SEMICOLON, |
| 119 | [40] = KEY_APOSTROPHE, | 118 | [40] = KEY_APOSTROPHE, |
| 120 | [41] = KEY_BACKSLASH, /* FIXME, '#' */ | 119 | [41] = KEY_GRAVE, |
| 121 | [42] = KEY_LEFTSHIFT, | 120 | [42] = KEY_LEFTSHIFT, |
| 122 | [43] = KEY_GRAVE, /* FIXME: '~' */ | 121 | [43] = KEY_BACKSLASH, |
| 123 | [44] = KEY_Z, | 122 | [44] = KEY_Z, |
| 124 | [45] = KEY_X, | 123 | [45] = KEY_X, |
| 125 | [46] = KEY_C, | 124 | [46] = KEY_C, |
| @@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */ | |||
| 145 | [66] = KEY_F8, | 144 | [66] = KEY_F8, |
| 146 | [67] = KEY_F9, | 145 | [67] = KEY_F9, |
| 147 | [68] = KEY_F10, | 146 | [68] = KEY_F10, |
| 148 | [69] = KEY_ESC, | 147 | [71] = KEY_HOME, |
| 149 | [70] = KEY_DELETE, | 148 | [72] = KEY_UP, |
| 150 | [71] = KEY_KP7, | ||
| 151 | [72] = KEY_KP8, | ||
| 152 | [73] = KEY_KP9, | ||
| 153 | [74] = KEY_KPMINUS, | 149 | [74] = KEY_KPMINUS, |
| 154 | [75] = KEY_KP4, | 150 | [75] = KEY_LEFT, |
| 155 | [76] = KEY_KP5, | 151 | [77] = KEY_RIGHT, |
| 156 | [77] = KEY_KP6, | ||
| 157 | [78] = KEY_KPPLUS, | 152 | [78] = KEY_KPPLUS, |
| 158 | [79] = KEY_KP1, | 153 | [80] = KEY_DOWN, |
| 159 | [80] = KEY_KP2, | 154 | [82] = KEY_INSERT, |
| 160 | [81] = KEY_KP3, | 155 | [83] = KEY_DELETE, |
| 161 | [82] = KEY_KP0, | ||
| 162 | [83] = KEY_KPDOT, | ||
| 163 | [90] = KEY_KPLEFTPAREN, | ||
| 164 | [91] = KEY_KPRIGHTPAREN, | ||
| 165 | [92] = KEY_KPASTERISK, /* FIXME */ | ||
| 166 | [93] = KEY_KPASTERISK, | ||
| 167 | [94] = KEY_KPPLUS, | ||
| 168 | [95] = KEY_HELP, | ||
| 169 | [96] = KEY_102ND, | 156 | [96] = KEY_102ND, |
| 170 | [97] = KEY_KPASTERISK, /* FIXME */ | 157 | [97] = KEY_UNDO, |
| 171 | [98] = KEY_KPSLASH, | 158 | [98] = KEY_HELP, |
| 172 | [99] = KEY_KPLEFTPAREN, | 159 | [99] = KEY_KPLEFTPAREN, |
| 173 | [100] = KEY_KPRIGHTPAREN, | 160 | [100] = KEY_KPRIGHTPAREN, |
| 174 | [101] = KEY_KPSLASH, | 161 | [101] = KEY_KPSLASH, |
| 175 | [102] = KEY_KPASTERISK, | 162 | [102] = KEY_KPASTERISK, |
| 176 | [103] = KEY_UP, | 163 | [103] = KEY_KP7, |
| 177 | [104] = KEY_KPASTERISK, /* FIXME */ | 164 | [104] = KEY_KP8, |
| 178 | [105] = KEY_LEFT, | 165 | [105] = KEY_KP9, |
| 179 | [106] = KEY_RIGHT, | 166 | [106] = KEY_KP4, |
| 180 | [107] = KEY_KPASTERISK, /* FIXME */ | 167 | [107] = KEY_KP5, |
| 181 | [108] = KEY_DOWN, | 168 | [108] = KEY_KP6, |
| 182 | [109] = KEY_KPASTERISK, /* FIXME */ | 169 | [109] = KEY_KP1, |
| 183 | [110] = KEY_KPASTERISK, /* FIXME */ | 170 | [110] = KEY_KP2, |
| 184 | [111] = KEY_KPASTERISK, /* FIXME */ | 171 | [111] = KEY_KP3, |
| 185 | [112] = KEY_KPASTERISK, /* FIXME */ | 172 | [112] = KEY_KP0, |
| 186 | [113] = KEY_KPASTERISK /* FIXME */ | 173 | [113] = KEY_KPDOT, |
| 174 | [114] = KEY_KPENTER, | ||
| 187 | }; | 175 | }; |
| 188 | 176 | ||
| 189 | static struct input_dev *atakbd_dev; | 177 | static struct input_dev *atakbd_dev; |
| @@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev; | |||
| 191 | static void atakbd_interrupt(unsigned char scancode, char down) | 179 | static void atakbd_interrupt(unsigned char scancode, char down) |
| 192 | { | 180 | { |
| 193 | 181 | ||
| 194 | if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ | 182 | if (scancode < 0x73) { /* scancodes < 0xf3 are keys */ |
| 195 | 183 | ||
| 196 | // report raw events here? | 184 | // report raw events here? |
| 197 | 185 | ||
| 198 | scancode = atakbd_keycode[scancode]; | 186 | scancode = atakbd_keycode[scancode]; |
| 199 | 187 | ||
| 200 | if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ | 188 | input_report_key(atakbd_dev, scancode, down); |
| 201 | input_report_key(atakbd_dev, scancode, 1); | 189 | input_sync(atakbd_dev); |
| 202 | input_report_key(atakbd_dev, scancode, 0); | 190 | } else /* scancodes >= 0xf3 are mouse data, most likely */ |
| 203 | input_sync(atakbd_dev); | ||
| 204 | } else { | ||
| 205 | input_report_key(atakbd_dev, scancode, down); | ||
| 206 | input_sync(atakbd_dev); | ||
| 207 | } | ||
| 208 | } else /* scancodes >= 0xf2 are mouse data, most likely */ | ||
| 209 | printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); | 191 | printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); |
| 210 | 192 | ||
| 211 | return; | 193 | return; |
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 96a887f33698..eb14ddf69346 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c | |||
| @@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, | |||
| 410 | min = abs->minimum; | 410 | min = abs->minimum; |
| 411 | max = abs->maximum; | 411 | max = abs->maximum; |
| 412 | 412 | ||
| 413 | if ((min != 0 || max != 0) && max <= min) { | 413 | if ((min != 0 || max != 0) && max < min) { |
| 414 | printk(KERN_DEBUG | 414 | printk(KERN_DEBUG |
| 415 | "%s: invalid abs[%02x] min:%d max:%d\n", | 415 | "%s: invalid abs[%02x] min:%d max:%d\n", |
| 416 | UINPUT_NAME, code, min, max); | 416 | UINPUT_NAME, code, min, max); |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 44f57cf6675b..2d95e8d93cc7 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
| @@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { | |||
| 1178 | static const char * const middle_button_pnp_ids[] = { | 1178 | static const char * const middle_button_pnp_ids[] = { |
| 1179 | "LEN2131", /* ThinkPad P52 w/ NFC */ | 1179 | "LEN2131", /* ThinkPad P52 w/ NFC */ |
| 1180 | "LEN2132", /* ThinkPad P52 */ | 1180 | "LEN2132", /* ThinkPad P52 */ |
| 1181 | "LEN2133", /* ThinkPad P72 w/ NFC */ | ||
| 1182 | "LEN2134", /* ThinkPad P72 */ | ||
| 1181 | NULL | 1183 | NULL |
| 1182 | }; | 1184 | }; |
| 1183 | 1185 | ||
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c index 80e69bb8283e..83ac8c128192 100644 --- a/drivers/input/touchscreen/egalax_ts.c +++ b/drivers/input/touchscreen/egalax_ts.c | |||
| @@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev) | |||
| 241 | struct i2c_client *client = to_i2c_client(dev); | 241 | struct i2c_client *client = to_i2c_client(dev); |
| 242 | int ret; | 242 | int ret; |
| 243 | 243 | ||
| 244 | if (device_may_wakeup(dev)) | ||
| 245 | return enable_irq_wake(client->irq); | ||
| 246 | |||
| 244 | ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); | 247 | ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); |
| 245 | return ret > 0 ? 0 : ret; | 248 | return ret > 0 ? 0 : ret; |
| 246 | } | 249 | } |
| @@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev) | |||
| 249 | { | 252 | { |
| 250 | struct i2c_client *client = to_i2c_client(dev); | 253 | struct i2c_client *client = to_i2c_client(dev); |
| 251 | 254 | ||
| 255 | if (device_may_wakeup(dev)) | ||
| 256 | return disable_irq_wake(client->irq); | ||
| 257 | |||
| 252 | return egalax_wake_up_device(client); | 258 | return egalax_wake_up_device(client); |
| 253 | } | 259 | } |
| 254 | 260 | ||
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 4e04fff23977..73e47d93e7a0 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev) | |||
| 246 | 246 | ||
| 247 | /* The callers make sure that get_device_id() does not fail here */ | 247 | /* The callers make sure that get_device_id() does not fail here */ |
| 248 | devid = get_device_id(dev); | 248 | devid = get_device_id(dev); |
| 249 | |||
| 250 | /* For ACPI HID devices, we simply return the devid as such */ | ||
| 251 | if (!dev_is_pci(dev)) | ||
| 252 | return devid; | ||
| 253 | |||
| 249 | ivrs_alias = amd_iommu_alias_table[devid]; | 254 | ivrs_alias = amd_iommu_alias_table[devid]; |
| 255 | |||
| 250 | pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); | 256 | pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); |
| 251 | 257 | ||
| 252 | if (ivrs_alias == pci_alias) | 258 | if (ivrs_alias == pci_alias) |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 5f3f10cf9d9d..bedc801b06a0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, | |||
| 2540 | if (dev && dev_is_pci(dev) && info->pasid_supported) { | 2540 | if (dev && dev_is_pci(dev) && info->pasid_supported) { |
| 2541 | ret = intel_pasid_alloc_table(dev); | 2541 | ret = intel_pasid_alloc_table(dev); |
| 2542 | if (ret) { | 2542 | if (ret) { |
| 2543 | __dmar_remove_one_dev_info(info); | 2543 | pr_warn("No pasid table for %s, pasid disabled\n", |
| 2544 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2544 | dev_name(dev)); |
| 2545 | return NULL; | 2545 | info->pasid_supported = 0; |
| 2546 | } | 2546 | } |
| 2547 | } | 2547 | } |
| 2548 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2548 | spin_unlock_irqrestore(&device_domain_lock, flags); |
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h index 1c05ed6fc5a5..1fb5e12b029a 100644 --- a/drivers/iommu/intel-pasid.h +++ b/drivers/iommu/intel-pasid.h | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | #define __INTEL_PASID_H | 11 | #define __INTEL_PASID_H |
| 12 | 12 | ||
| 13 | #define PASID_MIN 0x1 | 13 | #define PASID_MIN 0x1 |
| 14 | #define PASID_MAX 0x100000 | 14 | #define PASID_MAX 0x20000 |
| 15 | 15 | ||
| 16 | struct pasid_entry { | 16 | struct pasid_entry { |
| 17 | u64 val; | 17 | u64 val; |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 258115b10fa9..ad3e2b97469e 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
| @@ -1241,6 +1241,12 @@ err_unprepare_clocks: | |||
| 1241 | 1241 | ||
| 1242 | static void rk_iommu_shutdown(struct platform_device *pdev) | 1242 | static void rk_iommu_shutdown(struct platform_device *pdev) |
| 1243 | { | 1243 | { |
| 1244 | struct rk_iommu *iommu = platform_get_drvdata(pdev); | ||
| 1245 | int i = 0, irq; | ||
| 1246 | |||
| 1247 | while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) | ||
| 1248 | devm_free_irq(iommu->dev, irq, iommu); | ||
| 1249 | |||
| 1244 | pm_runtime_force_suspend(&pdev->dev); | 1250 | pm_runtime_force_suspend(&pdev->dev); |
| 1245 | } | 1251 | } |
| 1246 | 1252 | ||
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 83504dd8100a..954dad29e6e8 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
| @@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca); | |||
| 965 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); | 965 | void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); |
| 966 | 966 | ||
| 967 | extern struct workqueue_struct *bcache_wq; | 967 | extern struct workqueue_struct *bcache_wq; |
| 968 | extern struct workqueue_struct *bch_journal_wq; | ||
| 968 | extern struct mutex bch_register_lock; | 969 | extern struct mutex bch_register_lock; |
| 969 | extern struct list_head bch_cache_sets; | 970 | extern struct list_head bch_cache_sets; |
| 970 | 971 | ||
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 6116bbf870d8..522c7426f3a0 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
| @@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca) | |||
| 485 | 485 | ||
| 486 | closure_get(&ca->set->cl); | 486 | closure_get(&ca->set->cl); |
| 487 | INIT_WORK(&ja->discard_work, journal_discard_work); | 487 | INIT_WORK(&ja->discard_work, journal_discard_work); |
| 488 | schedule_work(&ja->discard_work); | 488 | queue_work(bch_journal_wq, &ja->discard_work); |
| 489 | } | 489 | } |
| 490 | } | 490 | } |
| 491 | 491 | ||
| @@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl) | |||
| 592 | : &j->w[0]; | 592 | : &j->w[0]; |
| 593 | 593 | ||
| 594 | __closure_wake_up(&w->wait); | 594 | __closure_wake_up(&w->wait); |
| 595 | continue_at_nobarrier(cl, journal_write, system_wq); | 595 | continue_at_nobarrier(cl, journal_write, bch_journal_wq); |
| 596 | } | 596 | } |
| 597 | 597 | ||
| 598 | static void journal_write_unlock(struct closure *cl) | 598 | static void journal_write_unlock(struct closure *cl) |
| @@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl) | |||
| 627 | spin_unlock(&c->journal.lock); | 627 | spin_unlock(&c->journal.lock); |
| 628 | 628 | ||
| 629 | btree_flush_write(c); | 629 | btree_flush_write(c); |
| 630 | continue_at(cl, journal_write, system_wq); | 630 | continue_at(cl, journal_write, bch_journal_wq); |
| 631 | return; | 631 | return; |
| 632 | } | 632 | } |
| 633 | 633 | ||
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 94c756c66bd7..30ba9aeb5ee8 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
| @@ -47,6 +47,7 @@ static int bcache_major; | |||
| 47 | static DEFINE_IDA(bcache_device_idx); | 47 | static DEFINE_IDA(bcache_device_idx); |
| 48 | static wait_queue_head_t unregister_wait; | 48 | static wait_queue_head_t unregister_wait; |
| 49 | struct workqueue_struct *bcache_wq; | 49 | struct workqueue_struct *bcache_wq; |
| 50 | struct workqueue_struct *bch_journal_wq; | ||
| 50 | 51 | ||
| 51 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) | 52 | #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) |
| 52 | /* limitation of partitions number on single bcache device */ | 53 | /* limitation of partitions number on single bcache device */ |
| @@ -2341,6 +2342,9 @@ static void bcache_exit(void) | |||
| 2341 | kobject_put(bcache_kobj); | 2342 | kobject_put(bcache_kobj); |
| 2342 | if (bcache_wq) | 2343 | if (bcache_wq) |
| 2343 | destroy_workqueue(bcache_wq); | 2344 | destroy_workqueue(bcache_wq); |
| 2345 | if (bch_journal_wq) | ||
| 2346 | destroy_workqueue(bch_journal_wq); | ||
| 2347 | |||
| 2344 | if (bcache_major) | 2348 | if (bcache_major) |
| 2345 | unregister_blkdev(bcache_major, "bcache"); | 2349 | unregister_blkdev(bcache_major, "bcache"); |
| 2346 | unregister_reboot_notifier(&reboot); | 2350 | unregister_reboot_notifier(&reboot); |
| @@ -2370,6 +2374,10 @@ static int __init bcache_init(void) | |||
| 2370 | if (!bcache_wq) | 2374 | if (!bcache_wq) |
| 2371 | goto err; | 2375 | goto err; |
| 2372 | 2376 | ||
| 2377 | bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); | ||
| 2378 | if (!bch_journal_wq) | ||
| 2379 | goto err; | ||
| 2380 | |||
| 2373 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); | 2381 | bcache_kobj = kobject_create_and_add("bcache", fs_kobj); |
| 2374 | if (!bcache_kobj) | 2382 | if (!bcache_kobj) |
| 2375 | goto err; | 2383 | goto err; |
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index b5410aeb5fe2..bb41bea950ac 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c | |||
| @@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client) | |||
| 1159 | V4L2_CID_AUTO_WHITE_BALANCE, | 1159 | V4L2_CID_AUTO_WHITE_BALANCE, |
| 1160 | 0, 1, 1, | 1160 | 0, 1, 1, |
| 1161 | V4L2_WHITE_BALANCE_AUTO); | 1161 | V4L2_WHITE_BALANCE_AUTO); |
| 1162 | if (IS_ERR_OR_NULL(mt9v111->auto_awb)) { | ||
| 1163 | ret = PTR_ERR(mt9v111->auto_awb); | ||
| 1164 | goto error_free_ctrls; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, | 1162 | mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, |
| 1168 | &mt9v111_ctrl_ops, | 1163 | &mt9v111_ctrl_ops, |
| 1169 | V4L2_CID_EXPOSURE_AUTO, | 1164 | V4L2_CID_EXPOSURE_AUTO, |
| 1170 | V4L2_EXPOSURE_MANUAL, | 1165 | V4L2_EXPOSURE_MANUAL, |
| 1171 | 0, V4L2_EXPOSURE_AUTO); | 1166 | 0, V4L2_EXPOSURE_AUTO); |
| 1172 | if (IS_ERR_OR_NULL(mt9v111->auto_exp)) { | ||
| 1173 | ret = PTR_ERR(mt9v111->auto_exp); | ||
| 1174 | goto error_free_ctrls; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | /* Initialize timings */ | ||
| 1178 | mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1167 | mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
| 1179 | V4L2_CID_HBLANK, | 1168 | V4L2_CID_HBLANK, |
| 1180 | MT9V111_CORE_R05_MIN_HBLANK, | 1169 | MT9V111_CORE_R05_MIN_HBLANK, |
| 1181 | MT9V111_CORE_R05_MAX_HBLANK, 1, | 1170 | MT9V111_CORE_R05_MAX_HBLANK, 1, |
| 1182 | MT9V111_CORE_R05_DEF_HBLANK); | 1171 | MT9V111_CORE_R05_DEF_HBLANK); |
| 1183 | if (IS_ERR_OR_NULL(mt9v111->hblank)) { | ||
| 1184 | ret = PTR_ERR(mt9v111->hblank); | ||
| 1185 | goto error_free_ctrls; | ||
| 1186 | } | ||
| 1187 | |||
| 1188 | mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1172 | mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
| 1189 | V4L2_CID_VBLANK, | 1173 | V4L2_CID_VBLANK, |
| 1190 | MT9V111_CORE_R06_MIN_VBLANK, | 1174 | MT9V111_CORE_R06_MIN_VBLANK, |
| 1191 | MT9V111_CORE_R06_MAX_VBLANK, 1, | 1175 | MT9V111_CORE_R06_MAX_VBLANK, 1, |
| 1192 | MT9V111_CORE_R06_DEF_VBLANK); | 1176 | MT9V111_CORE_R06_DEF_VBLANK); |
| 1193 | if (IS_ERR_OR_NULL(mt9v111->vblank)) { | ||
| 1194 | ret = PTR_ERR(mt9v111->vblank); | ||
| 1195 | goto error_free_ctrls; | ||
| 1196 | } | ||
| 1197 | 1177 | ||
| 1198 | /* PIXEL_RATE is fixed: just expose it to user space. */ | 1178 | /* PIXEL_RATE is fixed: just expose it to user space. */ |
| 1199 | v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, | 1179 | v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, |
| @@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client) | |||
| 1201 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, | 1181 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, |
| 1202 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); | 1182 | DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); |
| 1203 | 1183 | ||
| 1184 | if (mt9v111->ctrls.error) { | ||
| 1185 | ret = mt9v111->ctrls.error; | ||
| 1186 | goto error_free_ctrls; | ||
| 1187 | } | ||
| 1204 | mt9v111->sd.ctrl_handler = &mt9v111->ctrls; | 1188 | mt9v111->sd.ctrl_handler = &mt9v111->ctrls; |
| 1205 | 1189 | ||
| 1206 | /* Start with default configuration: 640x480 UYVY. */ | 1190 | /* Start with default configuration: 640x480 UYVY. */ |
| @@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client) | |||
| 1226 | mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; | 1210 | mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; |
| 1227 | ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); | 1211 | ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); |
| 1228 | if (ret) | 1212 | if (ret) |
| 1229 | goto error_free_ctrls; | 1213 | goto error_free_entity; |
| 1230 | #endif | 1214 | #endif |
| 1231 | 1215 | ||
| 1232 | ret = mt9v111_chip_probe(mt9v111); | 1216 | ret = mt9v111_chip_probe(mt9v111); |
| 1233 | if (ret) | 1217 | if (ret) |
| 1234 | goto error_free_ctrls; | 1218 | goto error_free_entity; |
| 1235 | 1219 | ||
| 1236 | ret = v4l2_async_register_subdev(&mt9v111->sd); | 1220 | ret = v4l2_async_register_subdev(&mt9v111->sd); |
| 1237 | if (ret) | 1221 | if (ret) |
| 1238 | goto error_free_ctrls; | 1222 | goto error_free_entity; |
| 1239 | 1223 | ||
| 1240 | return 0; | 1224 | return 0; |
| 1241 | 1225 | ||
| 1242 | error_free_ctrls: | 1226 | error_free_entity: |
| 1243 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1244 | |||
| 1245 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) | 1227 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) |
| 1246 | media_entity_cleanup(&mt9v111->sd.entity); | 1228 | media_entity_cleanup(&mt9v111->sd.entity); |
| 1247 | #endif | 1229 | #endif |
| 1248 | 1230 | ||
| 1231 | error_free_ctrls: | ||
| 1232 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1233 | |||
| 1249 | mutex_destroy(&mt9v111->pwr_mutex); | 1234 | mutex_destroy(&mt9v111->pwr_mutex); |
| 1250 | mutex_destroy(&mt9v111->stream_mutex); | 1235 | mutex_destroy(&mt9v111->stream_mutex); |
| 1251 | 1236 | ||
| @@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client) | |||
| 1259 | 1244 | ||
| 1260 | v4l2_async_unregister_subdev(sd); | 1245 | v4l2_async_unregister_subdev(sd); |
| 1261 | 1246 | ||
| 1262 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1263 | |||
| 1264 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) | 1247 | #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) |
| 1265 | media_entity_cleanup(&sd->entity); | 1248 | media_entity_cleanup(&sd->entity); |
| 1266 | #endif | 1249 | #endif |
| 1267 | 1250 | ||
| 1251 | v4l2_ctrl_handler_free(&mt9v111->ctrls); | ||
| 1252 | |||
| 1268 | mutex_destroy(&mt9v111->pwr_mutex); | 1253 | mutex_destroy(&mt9v111->pwr_mutex); |
| 1269 | mutex_destroy(&mt9v111->stream_mutex); | 1254 | mutex_destroy(&mt9v111->stream_mutex); |
| 1270 | 1255 | ||
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 94c1fe0e9787..54fe90acb5b2 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
| @@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC | |||
| 541 | depends on MFD_CROS_EC | 541 | depends on MFD_CROS_EC |
| 542 | select CEC_CORE | 542 | select CEC_CORE |
| 543 | select CEC_NOTIFIER | 543 | select CEC_NOTIFIER |
| 544 | select CHROME_PLATFORMS | ||
| 545 | select CROS_EC_PROTO | ||
| 544 | ---help--- | 546 | ---help--- |
| 545 | If you say yes here you will get support for the | 547 | If you say yes here you will get support for the |
| 546 | ChromeOS Embedded Controller's CEC. | 548 | ChromeOS Embedded Controller's CEC. |
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 729b31891466..a5ae85674ffb 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> | ||
| 13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c index c832539397d7..12bce391d71f 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> | ||
| 15 | 16 | ||
| 16 | #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) | 17 | #define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) |
| 17 | #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) | 18 | #define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c index bcd0dfd33618..2e65caf1ecae 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | 12 | ||
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/io.h> | ||
| 15 | 16 | ||
| 16 | #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) | 17 | #define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) |
| 17 | #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) | 18 | #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) |
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c index 4559f3b1b38c..008afb85023b 100644 --- a/drivers/media/platform/qcom/camss/camss-csiphy.c +++ b/drivers/media/platform/qcom/camss/camss-csiphy.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> | ||
| 13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 14 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c index 7f269021d08c..1f33b4eb198c 100644 --- a/drivers/media/platform/qcom/camss/camss-ispif.c +++ b/drivers/media/platform/qcom/camss/camss-ispif.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/clk.h> | 10 | #include <linux/clk.h> |
| 11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/io.h> | ||
| 13 | #include <linux/iopoll.h> | 14 | #include <linux/iopoll.h> |
| 14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
| 15 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| @@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif, | |||
| 1076 | else | 1077 | else |
| 1077 | return -EINVAL; | 1078 | return -EINVAL; |
| 1078 | 1079 | ||
| 1079 | ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), | 1080 | ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line), |
| 1080 | GFP_KERNEL); | 1081 | GFP_KERNEL); |
| 1081 | if (!ispif->line) | 1082 | if (!ispif->line) |
| 1082 | return -ENOMEM; | 1083 | return -ENOMEM; |
| 1083 | 1084 | ||
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c index da3a9fed9f2d..174a36be6f5d 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> | ||
| 12 | #include <linux/iopoll.h> | 13 | #include <linux/iopoll.h> |
| 13 | 14 | ||
| 14 | #include "camss-vfe.h" | 15 | #include "camss-vfe.h" |
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c index 4c584bffd179..0dca8bf9281e 100644 --- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c +++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/io.h> | ||
| 12 | #include <linux/iopoll.h> | 13 | #include <linux/iopoll.h> |
| 13 | 14 | ||
| 14 | #include "camss-vfe.h" | 15 | #include "camss-vfe.h" |
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c index dcc0c30ef1b1..669615fff6a0 100644 --- a/drivers/media/platform/qcom/camss/camss.c +++ b/drivers/media/platform/qcom/camss/camss.c | |||
| @@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev) | |||
| 848 | return -EINVAL; | 848 | return -EINVAL; |
| 849 | } | 849 | } |
| 850 | 850 | ||
| 851 | camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), | 851 | camss->csiphy = devm_kcalloc(dev, camss->csiphy_num, |
| 852 | GFP_KERNEL); | 852 | sizeof(*camss->csiphy), GFP_KERNEL); |
| 853 | if (!camss->csiphy) | 853 | if (!camss->csiphy) |
| 854 | return -ENOMEM; | 854 | return -ENOMEM; |
| 855 | 855 | ||
| 856 | camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), | 856 | camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid), |
| 857 | GFP_KERNEL); | 857 | GFP_KERNEL); |
| 858 | if (!camss->csid) | 858 | if (!camss->csid) |
| 859 | return -ENOMEM; | 859 | return -ENOMEM; |
| 860 | 860 | ||
| 861 | camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); | 861 | camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe), |
| 862 | GFP_KERNEL); | ||
| 862 | if (!camss->vfe) | 863 | if (!camss->vfe) |
| 863 | return -ENOMEM; | 864 | return -ENOMEM; |
| 864 | 865 | ||
| @@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = { | |||
| 993 | 994 | ||
| 994 | MODULE_DEVICE_TABLE(of, camss_dt_match); | 995 | MODULE_DEVICE_TABLE(of, camss_dt_match); |
| 995 | 996 | ||
| 996 | static int camss_runtime_suspend(struct device *dev) | 997 | static int __maybe_unused camss_runtime_suspend(struct device *dev) |
| 997 | { | 998 | { |
| 998 | return 0; | 999 | return 0; |
| 999 | } | 1000 | } |
| 1000 | 1001 | ||
| 1001 | static int camss_runtime_resume(struct device *dev) | 1002 | static int __maybe_unused camss_runtime_resume(struct device *dev) |
| 1002 | { | 1003 | { |
| 1003 | return 0; | 1004 | return 0; |
| 1004 | } | 1005 | } |
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 666d319d3d1a..1f6c1eefe389 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c | |||
| @@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap, | |||
| 402 | if (msg[0].addr == state->af9033_i2c_addr[1]) | 402 | if (msg[0].addr == state->af9033_i2c_addr[1]) |
| 403 | reg |= 0x100000; | 403 | reg |= 0x100000; |
| 404 | 404 | ||
| 405 | ret = af9035_wr_regs(d, reg, &msg[0].buf[3], | 405 | ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg, |
| 406 | msg[0].len - 3); | 406 | &msg[0].buf[3], |
| 407 | msg[0].len - 3) | ||
| 408 | : -EOPNOTSUPP; | ||
| 407 | } else { | 409 | } else { |
| 408 | /* I2C write */ | 410 | /* I2C write */ |
| 409 | u8 buf[MAX_XFER_SIZE]; | 411 | u8 buf[MAX_XFER_SIZE]; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a764a83f99da..0d87e11e7f1d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
| @@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev) | |||
| 971 | struct slave *slave = NULL; | 971 | struct slave *slave = NULL; |
| 972 | struct list_head *iter; | 972 | struct list_head *iter; |
| 973 | struct ad_info ad_info; | 973 | struct ad_info ad_info; |
| 974 | struct netpoll_info *ni; | ||
| 975 | const struct net_device_ops *ops; | ||
| 976 | 974 | ||
| 977 | if (BOND_MODE(bond) == BOND_MODE_8023AD) | 975 | if (BOND_MODE(bond) == BOND_MODE_8023AD) |
| 978 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) | 976 | if (bond_3ad_get_active_agg_info(bond, &ad_info)) |
| 979 | return; | 977 | return; |
| 980 | 978 | ||
| 981 | bond_for_each_slave_rcu(bond, slave, iter) { | 979 | bond_for_each_slave_rcu(bond, slave, iter) { |
| 982 | ops = slave->dev->netdev_ops; | 980 | if (!bond_slave_is_up(slave)) |
| 983 | if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller) | ||
| 984 | continue; | 981 | continue; |
| 985 | 982 | ||
| 986 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { | 983 | if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
| @@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev) | |||
| 992 | continue; | 989 | continue; |
| 993 | } | 990 | } |
| 994 | 991 | ||
| 995 | ni = rcu_dereference_bh(slave->dev->npinfo); | 992 | netpoll_poll_dev(slave->dev); |
| 996 | if (down_trylock(&ni->dev_lock)) | ||
| 997 | continue; | ||
| 998 | ops->ndo_poll_controller(slave->dev); | ||
| 999 | up(&ni->dev_lock); | ||
| 1000 | } | 993 | } |
| 1001 | } | 994 | } |
| 1002 | 995 | ||
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 024998d6d8c6..6a8e2567f2bd 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c | |||
| @@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); | |||
| 154 | static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); | 154 | static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); |
| 155 | static void bmac_set_timeout(struct net_device *dev); | 155 | static void bmac_set_timeout(struct net_device *dev); |
| 156 | static void bmac_tx_timeout(struct timer_list *t); | 156 | static void bmac_tx_timeout(struct timer_list *t); |
| 157 | static int bmac_output(struct sk_buff *skb, struct net_device *dev); | 157 | static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev); |
| 158 | static void bmac_start(struct net_device *dev); | 158 | static void bmac_start(struct net_device *dev); |
| 159 | 159 | ||
| 160 | #define DBDMA_SET(x) ( ((x) | (x) << 16) ) | 160 | #define DBDMA_SET(x) ( ((x) | (x) << 16) ) |
| @@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev) | |||
| 1456 | spin_unlock_irqrestore(&bp->lock, flags); | 1456 | spin_unlock_irqrestore(&bp->lock, flags); |
| 1457 | } | 1457 | } |
| 1458 | 1458 | ||
| 1459 | static int | 1459 | static netdev_tx_t |
| 1460 | bmac_output(struct sk_buff *skb, struct net_device *dev) | 1460 | bmac_output(struct sk_buff *skb, struct net_device *dev) |
| 1461 | { | 1461 | { |
| 1462 | struct bmac_data *bp = netdev_priv(dev); | 1462 | struct bmac_data *bp = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 0b5429d76bcf..68b9ee489489 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c | |||
| @@ -78,7 +78,7 @@ struct mace_data { | |||
| 78 | 78 | ||
| 79 | static int mace_open(struct net_device *dev); | 79 | static int mace_open(struct net_device *dev); |
| 80 | static int mace_close(struct net_device *dev); | 80 | static int mace_close(struct net_device *dev); |
| 81 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 81 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
| 82 | static void mace_set_multicast(struct net_device *dev); | 82 | static void mace_set_multicast(struct net_device *dev); |
| 83 | static void mace_reset(struct net_device *dev); | 83 | static void mace_reset(struct net_device *dev); |
| 84 | static int mace_set_address(struct net_device *dev, void *addr); | 84 | static int mace_set_address(struct net_device *dev, void *addr); |
| @@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev) | |||
| 525 | mp->timeout_active = 1; | 525 | mp->timeout_active = 1; |
| 526 | } | 526 | } |
| 527 | 527 | ||
| 528 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | 528 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) |
| 529 | { | 529 | { |
| 530 | struct mace_data *mp = netdev_priv(dev); | 530 | struct mace_data *mp = netdev_priv(dev); |
| 531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; | 531 | volatile struct dbdma_regs __iomem *td = mp->tx_dma; |
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c index 137cbb470af2..376f2c2613e7 100644 --- a/drivers/net/ethernet/apple/macmace.c +++ b/drivers/net/ethernet/apple/macmace.c | |||
| @@ -89,7 +89,7 @@ struct mace_frame { | |||
| 89 | 89 | ||
| 90 | static int mace_open(struct net_device *dev); | 90 | static int mace_open(struct net_device *dev); |
| 91 | static int mace_close(struct net_device *dev); | 91 | static int mace_close(struct net_device *dev); |
| 92 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); | 92 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev); |
| 93 | static void mace_set_multicast(struct net_device *dev); | 93 | static void mace_set_multicast(struct net_device *dev); |
| 94 | static int mace_set_address(struct net_device *dev, void *addr); | 94 | static int mace_set_address(struct net_device *dev, void *addr); |
| 95 | static void mace_reset(struct net_device *dev); | 95 | static void mace_reset(struct net_device *dev); |
| @@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev) | |||
| 444 | * Transmit a frame | 444 | * Transmit a frame |
| 445 | */ | 445 | */ |
| 446 | 446 | ||
| 447 | static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) | 447 | static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev) |
| 448 | { | 448 | { |
| 449 | struct mace_data *mp = netdev_priv(dev); | 449 | struct mace_data *mp = netdev_priv(dev); |
| 450 | unsigned long flags; | 450 | unsigned long flags; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b5f1f62e8e25..d1e1a0ba8615 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
| @@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | /* for single fragment packets use build_skb() */ | 227 | /* for single fragment packets use build_skb() */ |
| 228 | if (buff->is_eop) { | 228 | if (buff->is_eop && |
| 229 | buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { | ||
| 229 | skb = build_skb(page_address(buff->page), | 230 | skb = build_skb(page_address(buff->page), |
| 230 | buff->len + AQ_SKB_ALIGN); | 231 | AQ_CFG_RX_FRAME_MAX); |
| 231 | if (unlikely(!skb)) { | 232 | if (unlikely(!skb)) { |
| 232 | err = -ENOMEM; | 233 | err = -ENOMEM; |
| 233 | goto err_exit; | 234 | goto err_exit; |
| @@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self, | |||
| 247 | buff->len - ETH_HLEN, | 248 | buff->len - ETH_HLEN, |
| 248 | SKB_TRUESIZE(buff->len - ETH_HLEN)); | 249 | SKB_TRUESIZE(buff->len - ETH_HLEN)); |
| 249 | 250 | ||
| 250 | for (i = 1U, next_ = buff->next, | 251 | if (!buff->is_eop) { |
| 251 | buff_ = &self->buff_ring[next_]; true; | 252 | for (i = 1U, next_ = buff->next, |
| 252 | next_ = buff_->next, | 253 | buff_ = &self->buff_ring[next_]; |
| 253 | buff_ = &self->buff_ring[next_], ++i) { | 254 | true; next_ = buff_->next, |
| 254 | skb_add_rx_frag(skb, i, buff_->page, 0, | 255 | buff_ = &self->buff_ring[next_], ++i) { |
| 255 | buff_->len, | 256 | skb_add_rx_frag(skb, i, |
| 256 | SKB_TRUESIZE(buff->len - | 257 | buff_->page, 0, |
| 257 | ETH_HLEN)); | 258 | buff_->len, |
| 258 | buff_->is_cleaned = 1; | 259 | SKB_TRUESIZE(buff->len - |
| 259 | 260 | ETH_HLEN)); | |
| 260 | if (buff_->is_eop) | 261 | buff_->is_cleaned = 1; |
| 261 | break; | 262 | |
| 263 | if (buff_->is_eop) | ||
| 264 | break; | ||
| 265 | } | ||
| 262 | } | 266 | } |
| 263 | } | 267 | } |
| 264 | 268 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 71362b7f6040..fcc2328bb0d9 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 12894 | } | 12894 | } |
| 12895 | } | 12895 | } |
| 12896 | 12896 | ||
| 12897 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 12898 | static void poll_bnx2x(struct net_device *dev) | ||
| 12899 | { | ||
| 12900 | struct bnx2x *bp = netdev_priv(dev); | ||
| 12901 | int i; | ||
| 12902 | |||
| 12903 | for_each_eth_queue(bp, i) { | ||
| 12904 | struct bnx2x_fastpath *fp = &bp->fp[i]; | ||
| 12905 | napi_schedule(&bnx2x_fp(bp, fp->index, napi)); | ||
| 12906 | } | ||
| 12907 | } | ||
| 12908 | #endif | ||
| 12909 | |||
| 12910 | static int bnx2x_validate_addr(struct net_device *dev) | 12897 | static int bnx2x_validate_addr(struct net_device *dev) |
| 12911 | { | 12898 | { |
| 12912 | struct bnx2x *bp = netdev_priv(dev); | 12899 | struct bnx2x *bp = netdev_priv(dev); |
| @@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = { | |||
| 13113 | .ndo_tx_timeout = bnx2x_tx_timeout, | 13100 | .ndo_tx_timeout = bnx2x_tx_timeout, |
| 13114 | .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, | 13101 | .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, |
| 13115 | .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, | 13102 | .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, |
| 13116 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 13117 | .ndo_poll_controller = poll_bnx2x, | ||
| 13118 | #endif | ||
| 13119 | .ndo_setup_tc = __bnx2x_setup_tc, | 13103 | .ndo_setup_tc = __bnx2x_setup_tc, |
| 13120 | #ifdef CONFIG_BNX2X_SRIOV | 13104 | #ifdef CONFIG_BNX2X_SRIOV |
| 13121 | .ndo_set_vf_mac = bnx2x_set_vf_mac, | 13105 | .ndo_set_vf_mac = bnx2x_set_vf_mac, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 177587f9c3f1..61957b0bbd8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev) | |||
| 7672 | bnxt_queue_sp_work(bp); | 7672 | bnxt_queue_sp_work(bp); |
| 7673 | } | 7673 | } |
| 7674 | 7674 | ||
| 7675 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 7676 | static void bnxt_poll_controller(struct net_device *dev) | ||
| 7677 | { | ||
| 7678 | struct bnxt *bp = netdev_priv(dev); | ||
| 7679 | int i; | ||
| 7680 | |||
| 7681 | /* Only process tx rings/combined rings in netpoll mode. */ | ||
| 7682 | for (i = 0; i < bp->tx_nr_rings; i++) { | ||
| 7683 | struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; | ||
| 7684 | |||
| 7685 | napi_schedule(&txr->bnapi->napi); | ||
| 7686 | } | ||
| 7687 | } | ||
| 7688 | #endif | ||
| 7689 | |||
| 7690 | static void bnxt_timer(struct timer_list *t) | 7675 | static void bnxt_timer(struct timer_list *t) |
| 7691 | { | 7676 | { |
| 7692 | struct bnxt *bp = from_timer(bp, t, timer); | 7677 | struct bnxt *bp = from_timer(bp, t, timer); |
| @@ -8520,9 +8505,6 @@ static const struct net_device_ops bnxt_netdev_ops = { | |||
| 8520 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, | 8505 | .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, |
| 8521 | .ndo_set_vf_trust = bnxt_set_vf_trust, | 8506 | .ndo_set_vf_trust = bnxt_set_vf_trust, |
| 8522 | #endif | 8507 | #endif |
| 8523 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 8524 | .ndo_poll_controller = bnxt_poll_controller, | ||
| 8525 | #endif | ||
| 8526 | .ndo_setup_tc = bnxt_setup_tc, | 8508 | .ndo_setup_tc = bnxt_setup_tc, |
| 8527 | #ifdef CONFIG_RFS_ACCEL | 8509 | #ifdef CONFIG_RFS_ACCEL |
| 8528 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, | 8510 | .ndo_rx_flow_steer = bnxt_rx_flow_steer, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f3b9fbcc705b..790c684f08ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | |||
| @@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, | |||
| 46 | } | 46 | } |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | if (i == ARRAY_SIZE(nvm_params)) | ||
| 50 | return -EOPNOTSUPP; | ||
| 51 | |||
| 49 | if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) | 52 | if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) |
| 50 | idx = bp->pf.port_id; | 53 | idx = bp->pf.port_id; |
| 51 | else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) | 54 | else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 092c817f8f11..e1594c9df4c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | |||
| @@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp, | |||
| 75 | return 0; | 75 | return 0; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static void bnxt_tc_parse_vlan(struct bnxt *bp, | 78 | static int bnxt_tc_parse_vlan(struct bnxt *bp, |
| 79 | struct bnxt_tc_actions *actions, | 79 | struct bnxt_tc_actions *actions, |
| 80 | const struct tc_action *tc_act) | 80 | const struct tc_action *tc_act) |
| 81 | { | 81 | { |
| 82 | if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { | 82 | switch (tcf_vlan_action(tc_act)) { |
| 83 | case TCA_VLAN_ACT_POP: | ||
| 83 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; | 84 | actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; |
| 84 | } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { | 85 | break; |
| 86 | case TCA_VLAN_ACT_PUSH: | ||
| 85 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; | 87 | actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; |
| 86 | actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); | 88 | actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); |
| 87 | actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); | 89 | actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); |
| 90 | break; | ||
| 91 | default: | ||
| 92 | return -EOPNOTSUPP; | ||
| 88 | } | 93 | } |
| 94 | return 0; | ||
| 89 | } | 95 | } |
| 90 | 96 | ||
| 91 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, | 97 | static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, |
| @@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, | |||
| 134 | 140 | ||
| 135 | /* Push/pop VLAN */ | 141 | /* Push/pop VLAN */ |
| 136 | if (is_tcf_vlan(tc_act)) { | 142 | if (is_tcf_vlan(tc_act)) { |
| 137 | bnxt_tc_parse_vlan(bp, actions, tc_act); | 143 | rc = bnxt_tc_parse_vlan(bp, actions, tc_act); |
| 144 | if (rc) | ||
| 145 | return rc; | ||
| 138 | continue; | 146 | continue; |
| 139 | } | 147 | } |
| 140 | 148 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index b8f75a22fb6c..f152da1ce046 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | |||
| @@ -753,7 +753,6 @@ struct cpl_abort_req_rss { | |||
| 753 | }; | 753 | }; |
| 754 | 754 | ||
| 755 | struct cpl_abort_req_rss6 { | 755 | struct cpl_abort_req_rss6 { |
| 756 | WR_HDR; | ||
| 757 | union opcode_tid ot; | 756 | union opcode_tid ot; |
| 758 | __be32 srqidx_status; | 757 | __be32 srqidx_status; |
| 759 | }; | 758 | }; |
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index e2a702996db4..13dfdfca49fc 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c | |||
| @@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget) | |||
| 332 | return rx; | 332 | return rx; |
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) | 335 | static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) |
| 336 | { | 336 | { |
| 337 | struct ep93xx_priv *ep = netdev_priv(dev); | 337 | struct ep93xx_priv *ep = netdev_priv(dev); |
| 338 | struct ep93xx_tdesc *txd; | 338 | struct ep93xx_tdesc *txd; |
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 3f8fe8fd79cc..6324e80960c3 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c | |||
| @@ -113,7 +113,7 @@ struct net_local { | |||
| 113 | 113 | ||
| 114 | /* Index to functions, as function prototypes. */ | 114 | /* Index to functions, as function prototypes. */ |
| 115 | static int net_open(struct net_device *dev); | 115 | static int net_open(struct net_device *dev); |
| 116 | static int net_send_packet(struct sk_buff *skb, struct net_device *dev); | 116 | static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev); |
| 117 | static irqreturn_t net_interrupt(int irq, void *dev_id); | 117 | static irqreturn_t net_interrupt(int irq, void *dev_id); |
| 118 | static void set_multicast_list(struct net_device *dev); | 118 | static void set_multicast_list(struct net_device *dev); |
| 119 | static void net_rx(struct net_device *dev); | 119 | static void net_rx(struct net_device *dev); |
| @@ -324,7 +324,7 @@ net_open(struct net_device *dev) | |||
| 324 | return 0; | 324 | return 0; |
| 325 | } | 325 | } |
| 326 | 326 | ||
| 327 | static int | 327 | static netdev_tx_t |
| 328 | net_send_packet(struct sk_buff *skb, struct net_device *dev) | 328 | net_send_packet(struct sk_buff *skb, struct net_device *dev) |
| 329 | { | 329 | { |
| 330 | struct net_local *lp = netdev_priv(dev); | 330 | struct net_local *lp = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c index dc983450354b..35f6291a3672 100644 --- a/drivers/net/ethernet/i825xx/ether1.c +++ b/drivers/net/ethernet/i825xx/ether1.c | |||
| @@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG; | |||
| 64 | #define RX_AREA_END 0x0fc00 | 64 | #define RX_AREA_END 0x0fc00 |
| 65 | 65 | ||
| 66 | static int ether1_open(struct net_device *dev); | 66 | static int ether1_open(struct net_device *dev); |
| 67 | static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); | 67 | static netdev_tx_t ether1_sendpacket(struct sk_buff *skb, |
| 68 | struct net_device *dev); | ||
| 68 | static irqreturn_t ether1_interrupt(int irq, void *dev_id); | 69 | static irqreturn_t ether1_interrupt(int irq, void *dev_id); |
| 69 | static int ether1_close(struct net_device *dev); | 70 | static int ether1_close(struct net_device *dev); |
| 70 | static void ether1_setmulticastlist(struct net_device *dev); | 71 | static void ether1_setmulticastlist(struct net_device *dev); |
| @@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev) | |||
| 667 | netif_wake_queue(dev); | 668 | netif_wake_queue(dev); |
| 668 | } | 669 | } |
| 669 | 670 | ||
| 670 | static int | 671 | static netdev_tx_t |
| 671 | ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) | 672 | ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) |
| 672 | { | 673 | { |
| 673 | int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; | 674 | int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; |
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index f00a1dc2128c..2f7ae118217f 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c | |||
| @@ -347,7 +347,7 @@ static const char init_setup[] = | |||
| 347 | 0x7f /* *multi IA */ }; | 347 | 0x7f /* *multi IA */ }; |
| 348 | 348 | ||
| 349 | static int i596_open(struct net_device *dev); | 349 | static int i596_open(struct net_device *dev); |
| 350 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); | 350 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); |
| 351 | static irqreturn_t i596_interrupt(int irq, void *dev_id); | 351 | static irqreturn_t i596_interrupt(int irq, void *dev_id); |
| 352 | static int i596_close(struct net_device *dev); | 352 | static int i596_close(struct net_device *dev); |
| 353 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); | 353 | static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); |
| @@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev) | |||
| 966 | } | 966 | } |
| 967 | 967 | ||
| 968 | 968 | ||
| 969 | static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) | 969 | static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 970 | { | 970 | { |
| 971 | struct i596_private *lp = netdev_priv(dev); | 971 | struct i596_private *lp = netdev_priv(dev); |
| 972 | struct tx_cmd *tx_cmd; | 972 | struct tx_cmd *tx_cmd; |
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 8bb15a8c2a40..1a86184d44c0 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c | |||
| @@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr); | |||
| 121 | static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); | 121 | static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); |
| 122 | static int sun3_82586_open(struct net_device *dev); | 122 | static int sun3_82586_open(struct net_device *dev); |
| 123 | static int sun3_82586_close(struct net_device *dev); | 123 | static int sun3_82586_close(struct net_device *dev); |
| 124 | static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); | 124 | static netdev_tx_t sun3_82586_send_packet(struct sk_buff *, |
| 125 | struct net_device *); | ||
| 125 | static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); | 126 | static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); |
| 126 | static void set_multicast_list(struct net_device *dev); | 127 | static void set_multicast_list(struct net_device *dev); |
| 127 | static void sun3_82586_timeout(struct net_device *dev); | 128 | static void sun3_82586_timeout(struct net_device *dev); |
| @@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev) | |||
| 1002 | * send frame | 1003 | * send frame |
| 1003 | */ | 1004 | */ |
| 1004 | 1005 | ||
| 1005 | static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | 1006 | static netdev_tx_t |
| 1007 | sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) | ||
| 1006 | { | 1008 | { |
| 1007 | int len,i; | 1009 | int len,i; |
| 1008 | #ifndef NO_NOPCOMMANDS | 1010 | #ifndef NO_NOPCOMMANDS |
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 372664686309..129f4e9f38da 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c | |||
| @@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev) | |||
| 2677 | if (of_phy_is_fixed_link(np)) { | 2677 | if (of_phy_is_fixed_link(np)) { |
| 2678 | int res = emac_dt_mdio_probe(dev); | 2678 | int res = emac_dt_mdio_probe(dev); |
| 2679 | 2679 | ||
| 2680 | if (!res) { | 2680 | if (res) |
| 2681 | res = of_phy_register_fixed_link(np); | 2681 | return res; |
| 2682 | if (res) | 2682 | |
| 2683 | mdiobus_unregister(dev->mii_bus); | 2683 | res = of_phy_register_fixed_link(np); |
| 2684 | dev->phy_dev = of_phy_find_device(np); | ||
| 2685 | if (res || !dev->phy_dev) { | ||
| 2686 | mdiobus_unregister(dev->mii_bus); | ||
| 2687 | return res ? res : -EINVAL; | ||
| 2684 | } | 2688 | } |
| 2685 | return res; | 2689 | emac_adjust_link(dev->ndev); |
| 2690 | put_device(&dev->phy_dev->mdio.dev); | ||
| 2686 | } | 2691 | } |
| 2687 | return 0; | 2692 | return 0; |
| 2688 | } | 2693 | } |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index a903a0ba45e1..7d42582ed48d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h | |||
| @@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface); | |||
| 504 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); | 504 | void fm10k_service_event_schedule(struct fm10k_intfc *interface); |
| 505 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); | 505 | void fm10k_macvlan_schedule(struct fm10k_intfc *interface); |
| 506 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); | 506 | void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); |
| 507 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 508 | void fm10k_netpoll(struct net_device *netdev); | ||
| 509 | #endif | ||
| 510 | 507 | ||
| 511 | /* Netdev */ | 508 | /* Netdev */ |
| 512 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); | 509 | struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 929f538d28bc..538a8467f434 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |||
| @@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = { | |||
| 1648 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, | 1648 | .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, |
| 1649 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, | 1649 | .ndo_dfwd_add_station = fm10k_dfwd_add_station, |
| 1650 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, | 1650 | .ndo_dfwd_del_station = fm10k_dfwd_del_station, |
| 1651 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1652 | .ndo_poll_controller = fm10k_netpoll, | ||
| 1653 | #endif | ||
| 1654 | .ndo_features_check = fm10k_features_check, | 1651 | .ndo_features_check = fm10k_features_check, |
| 1655 | }; | 1652 | }; |
| 1656 | 1653 | ||
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 15071e4adb98..c859ababeed5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c | |||
| @@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) | |||
| 1210 | return IRQ_HANDLED; | 1210 | return IRQ_HANDLED; |
| 1211 | } | 1211 | } |
| 1212 | 1212 | ||
| 1213 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1214 | /** | ||
| 1215 | * fm10k_netpoll - A Polling 'interrupt' handler | ||
| 1216 | * @netdev: network interface device structure | ||
| 1217 | * | ||
| 1218 | * This is used by netconsole to send skbs without having to re-enable | ||
| 1219 | * interrupts. It's not called while the normal interrupt routine is executing. | ||
| 1220 | **/ | ||
| 1221 | void fm10k_netpoll(struct net_device *netdev) | ||
| 1222 | { | ||
| 1223 | struct fm10k_intfc *interface = netdev_priv(netdev); | ||
| 1224 | int i; | ||
| 1225 | |||
| 1226 | /* if interface is down do nothing */ | ||
| 1227 | if (test_bit(__FM10K_DOWN, interface->state)) | ||
| 1228 | return; | ||
| 1229 | |||
| 1230 | for (i = 0; i < interface->num_q_vectors; i++) | ||
| 1231 | fm10k_msix_clean_rings(0, interface->q_vector[i]); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | #endif | ||
| 1235 | #define FM10K_ERR_MSG(type) case (type): error = #type; break | 1213 | #define FM10K_ERR_MSG(type) case (type): error = #type; break |
| 1236 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, | 1214 | static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, |
| 1237 | struct fm10k_fault *fault) | 1215 | struct fm10k_fault *fault) |
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5906c1c1d19d..fef6d892ed4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
| @@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) | |||
| 396 | adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; | 396 | adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 400 | /** | ||
| 401 | * i40evf_netpoll - A Polling 'interrupt' handler | ||
| 402 | * @netdev: network interface device structure | ||
| 403 | * | ||
| 404 | * This is used by netconsole to send skbs without having to re-enable | ||
| 405 | * interrupts. It's not called while the normal interrupt routine is executing. | ||
| 406 | **/ | ||
| 407 | static void i40evf_netpoll(struct net_device *netdev) | ||
| 408 | { | ||
| 409 | struct i40evf_adapter *adapter = netdev_priv(netdev); | ||
| 410 | int q_vectors = adapter->num_msix_vectors - NONQ_VECS; | ||
| 411 | int i; | ||
| 412 | |||
| 413 | /* if interface is down do nothing */ | ||
| 414 | if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state)) | ||
| 415 | return; | ||
| 416 | |||
| 417 | for (i = 0; i < q_vectors; i++) | ||
| 418 | i40evf_msix_clean_rings(0, &adapter->q_vectors[i]); | ||
| 419 | } | ||
| 420 | |||
| 421 | #endif | ||
| 422 | /** | 399 | /** |
| 423 | * i40evf_irq_affinity_notify - Callback for affinity changes | 400 | * i40evf_irq_affinity_notify - Callback for affinity changes |
| 424 | * @notify: context as to what irq was changed | 401 | * @notify: context as to what irq was changed |
| @@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = { | |||
| 3229 | .ndo_features_check = i40evf_features_check, | 3206 | .ndo_features_check = i40evf_features_check, |
| 3230 | .ndo_fix_features = i40evf_fix_features, | 3207 | .ndo_fix_features = i40evf_fix_features, |
| 3231 | .ndo_set_features = i40evf_set_features, | 3208 | .ndo_set_features = i40evf_set_features, |
| 3232 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3233 | .ndo_poll_controller = i40evf_netpoll, | ||
| 3234 | #endif | ||
| 3235 | .ndo_setup_tc = i40evf_setup_tc, | 3209 | .ndo_setup_tc = i40evf_setup_tc, |
| 3236 | }; | 3210 | }; |
| 3237 | 3211 | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f1e80eed2fd6..3f047bb43348 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
| @@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) | |||
| 4806 | stats->rx_length_errors = vsi_stats->rx_length_errors; | 4806 | stats->rx_length_errors = vsi_stats->rx_length_errors; |
| 4807 | } | 4807 | } |
| 4808 | 4808 | ||
| 4809 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4810 | /** | ||
| 4811 | * ice_netpoll - polling "interrupt" handler | ||
| 4812 | * @netdev: network interface device structure | ||
| 4813 | * | ||
| 4814 | * Used by netconsole to send skbs without having to re-enable interrupts. | ||
| 4815 | * This is not called in the normal interrupt path. | ||
| 4816 | */ | ||
| 4817 | static void ice_netpoll(struct net_device *netdev) | ||
| 4818 | { | ||
| 4819 | struct ice_netdev_priv *np = netdev_priv(netdev); | ||
| 4820 | struct ice_vsi *vsi = np->vsi; | ||
| 4821 | struct ice_pf *pf = vsi->back; | ||
| 4822 | int i; | ||
| 4823 | |||
| 4824 | if (test_bit(__ICE_DOWN, vsi->state) || | ||
| 4825 | !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | ||
| 4826 | return; | ||
| 4827 | |||
| 4828 | for (i = 0; i < vsi->num_q_vectors; i++) | ||
| 4829 | ice_msix_clean_rings(0, vsi->q_vectors[i]); | ||
| 4830 | } | ||
| 4831 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 4832 | |||
| 4833 | /** | 4809 | /** |
| 4834 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI | 4810 | * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI |
| 4835 | * @vsi: VSI having NAPI disabled | 4811 | * @vsi: VSI having NAPI disabled |
| @@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = { | |||
| 5497 | .ndo_validate_addr = eth_validate_addr, | 5473 | .ndo_validate_addr = eth_validate_addr, |
| 5498 | .ndo_change_mtu = ice_change_mtu, | 5474 | .ndo_change_mtu = ice_change_mtu, |
| 5499 | .ndo_get_stats64 = ice_get_stats64, | 5475 | .ndo_get_stats64 = ice_get_stats64, |
| 5500 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 5501 | .ndo_poll_controller = ice_netpoll, | ||
| 5502 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 5503 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, | 5476 | .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, |
| 5504 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, | 5477 | .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, |
| 5505 | .ndo_set_features = ice_set_features, | 5478 | .ndo_set_features = ice_set_features, |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a32c576c1e65..0796cef96fa3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
| @@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = { | |||
| 205 | .priority = 0 | 205 | .priority = 0 |
| 206 | }; | 206 | }; |
| 207 | #endif | 207 | #endif |
| 208 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 209 | /* for netdump / net console */ | ||
| 210 | static void igb_netpoll(struct net_device *); | ||
| 211 | #endif | ||
| 212 | #ifdef CONFIG_PCI_IOV | 208 | #ifdef CONFIG_PCI_IOV |
| 213 | static unsigned int max_vfs; | 209 | static unsigned int max_vfs; |
| 214 | module_param(max_vfs, uint, 0); | 210 | module_param(max_vfs, uint, 0); |
| @@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = { | |||
| 2881 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, | 2877 | .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, |
| 2882 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, | 2878 | .ndo_set_vf_trust = igb_ndo_set_vf_trust, |
| 2883 | .ndo_get_vf_config = igb_ndo_get_vf_config, | 2879 | .ndo_get_vf_config = igb_ndo_get_vf_config, |
| 2884 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2885 | .ndo_poll_controller = igb_netpoll, | ||
| 2886 | #endif | ||
| 2887 | .ndo_fix_features = igb_fix_features, | 2880 | .ndo_fix_features = igb_fix_features, |
| 2888 | .ndo_set_features = igb_set_features, | 2881 | .ndo_set_features = igb_set_features, |
| 2889 | .ndo_fdb_add = igb_ndo_fdb_add, | 2882 | .ndo_fdb_add = igb_ndo_fdb_add, |
| @@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs) | |||
| 9053 | return 0; | 9046 | return 0; |
| 9054 | } | 9047 | } |
| 9055 | 9048 | ||
| 9056 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 9057 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 9058 | * without having to re-enable interrupts. It's not called while | ||
| 9059 | * the interrupt routine is executing. | ||
| 9060 | */ | ||
| 9061 | static void igb_netpoll(struct net_device *netdev) | ||
| 9062 | { | ||
| 9063 | struct igb_adapter *adapter = netdev_priv(netdev); | ||
| 9064 | struct e1000_hw *hw = &adapter->hw; | ||
| 9065 | struct igb_q_vector *q_vector; | ||
| 9066 | int i; | ||
| 9067 | |||
| 9068 | for (i = 0; i < adapter->num_q_vectors; i++) { | ||
| 9069 | q_vector = adapter->q_vector[i]; | ||
| 9070 | if (adapter->flags & IGB_FLAG_HAS_MSIX) | ||
| 9071 | wr32(E1000_EIMC, q_vector->eims_value); | ||
| 9072 | else | ||
| 9073 | igb_irq_disable(adapter); | ||
| 9074 | napi_schedule(&q_vector->napi); | ||
| 9075 | } | ||
| 9076 | } | ||
| 9077 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 9078 | |||
| 9079 | /** | 9049 | /** |
| 9080 | * igb_io_error_detected - called when PCI error is detected | 9050 | * igb_io_error_detected - called when PCI error is detected |
| 9081 | * @pdev: Pointer to PCI device | 9051 | * @pdev: Pointer to PCI device |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index d3e72d0f66ef..7722153c4ac2 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
| @@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, | |||
| 81 | __be16 proto, u16 vid); | 81 | __be16 proto, u16 vid); |
| 82 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); | 82 | static void ixgb_restore_vlan(struct ixgb_adapter *adapter); |
| 83 | 83 | ||
| 84 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 85 | /* for netdump / net console */ | ||
| 86 | static void ixgb_netpoll(struct net_device *dev); | ||
| 87 | #endif | ||
| 88 | |||
| 89 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, | 84 | static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, |
| 90 | enum pci_channel_state state); | 85 | enum pci_channel_state state); |
| 91 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); | 86 | static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); |
| @@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = { | |||
| 348 | .ndo_tx_timeout = ixgb_tx_timeout, | 343 | .ndo_tx_timeout = ixgb_tx_timeout, |
| 349 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, | 344 | .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, |
| 350 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, | 345 | .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, |
| 351 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 352 | .ndo_poll_controller = ixgb_netpoll, | ||
| 353 | #endif | ||
| 354 | .ndo_fix_features = ixgb_fix_features, | 346 | .ndo_fix_features = ixgb_fix_features, |
| 355 | .ndo_set_features = ixgb_set_features, | 347 | .ndo_set_features = ixgb_set_features, |
| 356 | }; | 348 | }; |
| @@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) | |||
| 2195 | ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); | 2187 | ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); |
| 2196 | } | 2188 | } |
| 2197 | 2189 | ||
| 2198 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2199 | /* | ||
| 2200 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 2201 | * without having to re-enable interrupts. It's not called while | ||
| 2202 | * the interrupt routine is executing. | ||
| 2203 | */ | ||
| 2204 | |||
| 2205 | static void ixgb_netpoll(struct net_device *dev) | ||
| 2206 | { | ||
| 2207 | struct ixgb_adapter *adapter = netdev_priv(dev); | ||
| 2208 | |||
| 2209 | disable_irq(adapter->pdev->irq); | ||
| 2210 | ixgb_intr(adapter->pdev->irq, dev); | ||
| 2211 | enable_irq(adapter->pdev->irq); | ||
| 2212 | } | ||
| 2213 | #endif | ||
| 2214 | |||
| 2215 | /** | 2190 | /** |
| 2216 | * ixgb_io_error_detected - called when PCI error is detected | 2191 | * ixgb_io_error_detected - called when PCI error is detected |
| 2217 | * @pdev: pointer to pci device with error | 2192 | * @pdev: pointer to pci device with error |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9a23d33a47ed..f27d73a7bf16 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -8768,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) | |||
| 8768 | return err; | 8768 | return err; |
| 8769 | } | 8769 | } |
| 8770 | 8770 | ||
| 8771 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 8772 | /* | ||
| 8773 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 8774 | * without having to re-enable interrupts. It's not called while | ||
| 8775 | * the interrupt routine is executing. | ||
| 8776 | */ | ||
| 8777 | static void ixgbe_netpoll(struct net_device *netdev) | ||
| 8778 | { | ||
| 8779 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | ||
| 8780 | int i; | ||
| 8781 | |||
| 8782 | /* if interface is down do nothing */ | ||
| 8783 | if (test_bit(__IXGBE_DOWN, &adapter->state)) | ||
| 8784 | return; | ||
| 8785 | |||
| 8786 | /* loop through and schedule all active queues */ | ||
| 8787 | for (i = 0; i < adapter->num_q_vectors; i++) | ||
| 8788 | ixgbe_msix_clean_rings(0, adapter->q_vector[i]); | ||
| 8789 | } | ||
| 8790 | |||
| 8791 | #endif | ||
| 8792 | |||
| 8793 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, | 8771 | static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, |
| 8794 | struct ixgbe_ring *ring) | 8772 | struct ixgbe_ring *ring) |
| 8795 | { | 8773 | { |
| @@ -10251,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { | |||
| 10251 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, | 10229 | .ndo_get_vf_config = ixgbe_ndo_get_vf_config, |
| 10252 | .ndo_get_stats64 = ixgbe_get_stats64, | 10230 | .ndo_get_stats64 = ixgbe_get_stats64, |
| 10253 | .ndo_setup_tc = __ixgbe_setup_tc, | 10231 | .ndo_setup_tc = __ixgbe_setup_tc, |
| 10254 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 10255 | .ndo_poll_controller = ixgbe_netpoll, | ||
| 10256 | #endif | ||
| 10257 | #ifdef IXGBE_FCOE | 10232 | #ifdef IXGBE_FCOE |
| 10258 | .ndo_select_queue = ixgbe_select_queue, | 10233 | .ndo_select_queue = ixgbe_select_queue, |
| 10259 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, | 10234 | .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d86446d202d5..5a228582423b 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
| @@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) | |||
| 4233 | return 0; | 4233 | return 0; |
| 4234 | } | 4234 | } |
| 4235 | 4235 | ||
| 4236 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4237 | /* Polling 'interrupt' - used by things like netconsole to send skbs | ||
| 4238 | * without having to re-enable interrupts. It's not called while | ||
| 4239 | * the interrupt routine is executing. | ||
| 4240 | */ | ||
| 4241 | static void ixgbevf_netpoll(struct net_device *netdev) | ||
| 4242 | { | ||
| 4243 | struct ixgbevf_adapter *adapter = netdev_priv(netdev); | ||
| 4244 | int i; | ||
| 4245 | |||
| 4246 | /* if interface is down do nothing */ | ||
| 4247 | if (test_bit(__IXGBEVF_DOWN, &adapter->state)) | ||
| 4248 | return; | ||
| 4249 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
| 4250 | ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); | ||
| 4251 | } | ||
| 4252 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | ||
| 4253 | |||
| 4254 | static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) | 4236 | static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) |
| 4255 | { | 4237 | { |
| 4256 | struct net_device *netdev = pci_get_drvdata(pdev); | 4238 | struct net_device *netdev = pci_get_drvdata(pdev); |
| @@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = { | |||
| 4482 | .ndo_tx_timeout = ixgbevf_tx_timeout, | 4464 | .ndo_tx_timeout = ixgbevf_tx_timeout, |
| 4483 | .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, | 4465 | .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, |
| 4484 | .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, | 4466 | .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, |
| 4485 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4486 | .ndo_poll_controller = ixgbevf_netpoll, | ||
| 4487 | #endif | ||
| 4488 | .ndo_features_check = ixgbevf_features_check, | 4467 | .ndo_features_check = ixgbevf_features_check, |
| 4489 | .ndo_bpf = ixgbevf_xdp, | 4468 | .ndo_bpf = ixgbevf_xdp, |
| 4490 | }; | 4469 | }; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc80a678abc3..b4ed7d394d07 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
| @@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, | |||
| 1890 | if (!data || !(rx_desc->buf_phys_addr)) | 1890 | if (!data || !(rx_desc->buf_phys_addr)) |
| 1891 | continue; | 1891 | continue; |
| 1892 | 1892 | ||
| 1893 | dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, | 1893 | dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, |
| 1894 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); | 1894 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 1895 | __free_page(data); | 1895 | __free_page(data); |
| 1896 | } | 1896 | } |
| 1897 | } | 1897 | } |
| @@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, | |||
| 2008 | skb_add_rx_frag(rxq->skb, frag_num, page, | 2008 | skb_add_rx_frag(rxq->skb, frag_num, page, |
| 2009 | frag_offset, frag_size, | 2009 | frag_offset, frag_size, |
| 2010 | PAGE_SIZE); | 2010 | PAGE_SIZE); |
| 2011 | dma_unmap_single(dev->dev.parent, phys_addr, | 2011 | dma_unmap_page(dev->dev.parent, phys_addr, |
| 2012 | PAGE_SIZE, DMA_FROM_DEVICE); | 2012 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 2013 | rxq->left_size -= frag_size; | 2013 | rxq->left_size -= frag_size; |
| 2014 | } | 2014 | } |
| 2015 | } else { | 2015 | } else { |
| @@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi, | |||
| 2039 | frag_offset, frag_size, | 2039 | frag_offset, frag_size, |
| 2040 | PAGE_SIZE); | 2040 | PAGE_SIZE); |
| 2041 | 2041 | ||
| 2042 | dma_unmap_single(dev->dev.parent, phys_addr, | 2042 | dma_unmap_page(dev->dev.parent, phys_addr, |
| 2043 | PAGE_SIZE, | 2043 | PAGE_SIZE, DMA_FROM_DEVICE); |
| 2044 | DMA_FROM_DEVICE); | ||
| 2045 | 2044 | ||
| 2046 | rxq->left_size -= frag_size; | 2045 | rxq->left_size -= frag_size; |
| 2047 | } | 2046 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 702fec82d806..38cc01beea79 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | |||
| @@ -3055,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) | |||
| 3055 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); | 3055 | cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); |
| 3056 | } | 3056 | } |
| 3057 | 3057 | ||
| 3058 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; | 3058 | if (port->has_tx_irqs) { |
| 3059 | if (cause_tx) { | 3059 | cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; |
| 3060 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; | 3060 | if (cause_tx) { |
| 3061 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | 3061 | cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; |
| 3062 | mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); | ||
| 3063 | } | ||
| 3062 | } | 3064 | } |
| 3063 | 3065 | ||
| 3064 | /* Process RX packets */ | 3066 | /* Process RX packets */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6785661d1a72..fe49384eba48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1286,20 +1286,6 @@ out: | |||
| 1286 | mutex_unlock(&mdev->state_lock); | 1286 | mutex_unlock(&mdev->state_lock); |
| 1287 | } | 1287 | } |
| 1288 | 1288 | ||
| 1289 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1290 | static void mlx4_en_netpoll(struct net_device *dev) | ||
| 1291 | { | ||
| 1292 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
| 1293 | struct mlx4_en_cq *cq; | ||
| 1294 | int i; | ||
| 1295 | |||
| 1296 | for (i = 0; i < priv->tx_ring_num[TX]; i++) { | ||
| 1297 | cq = priv->tx_cq[TX][i]; | ||
| 1298 | napi_schedule(&cq->napi); | ||
| 1299 | } | ||
| 1300 | } | ||
| 1301 | #endif | ||
| 1302 | |||
| 1303 | static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) | 1289 | static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) |
| 1304 | { | 1290 | { |
| 1305 | u64 reg_id; | 1291 | u64 reg_id; |
| @@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
| 2946 | .ndo_tx_timeout = mlx4_en_tx_timeout, | 2932 | .ndo_tx_timeout = mlx4_en_tx_timeout, |
| 2947 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, | 2933 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, |
| 2948 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, | 2934 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, |
| 2949 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2950 | .ndo_poll_controller = mlx4_en_netpoll, | ||
| 2951 | #endif | ||
| 2952 | .ndo_set_features = mlx4_en_set_features, | 2935 | .ndo_set_features = mlx4_en_set_features, |
| 2953 | .ndo_fix_features = mlx4_en_fix_features, | 2936 | .ndo_fix_features = mlx4_en_fix_features, |
| 2954 | .ndo_setup_tc = __mlx4_en_setup_tc, | 2937 | .ndo_setup_tc = __mlx4_en_setup_tc, |
| @@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
| 2983 | .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, | 2966 | .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, |
| 2984 | .ndo_get_vf_stats = mlx4_en_get_vf_stats, | 2967 | .ndo_get_vf_stats = mlx4_en_get_vf_stats, |
| 2985 | .ndo_get_vf_config = mlx4_en_get_vf_config, | 2968 | .ndo_get_vf_config = mlx4_en_get_vf_config, |
| 2986 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 2987 | .ndo_poll_controller = mlx4_en_netpoll, | ||
| 2988 | #endif | ||
| 2989 | .ndo_set_features = mlx4_en_set_features, | 2969 | .ndo_set_features = mlx4_en_set_features, |
| 2990 | .ndo_fix_features = mlx4_en_fix_features, | 2970 | .ndo_fix_features = mlx4_en_fix_features, |
| 2991 | .ndo_setup_tc = __mlx4_en_setup_tc, | 2971 | .ndo_setup_tc = __mlx4_en_setup_tc, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 1f3372c1802e..2df92dbd38e1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
| @@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) | |||
| 240 | struct mlx4_dev *dev = &priv->dev; | 240 | struct mlx4_dev *dev = &priv->dev; |
| 241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; | 241 | struct mlx4_eq *eq = &priv->eq_table.eq[vec]; |
| 242 | 242 | ||
| 243 | if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) | 243 | if (!cpumask_available(eq->affinity_mask) || |
| 244 | cpumask_empty(eq->affinity_mask)) | ||
| 244 | return; | 245 | return; |
| 245 | 246 | ||
| 246 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); | 247 | hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3ce14d42ddc8..a53736c26c0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
| @@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent) | |||
| 206 | u8 own; | 206 | u8 own; |
| 207 | 207 | ||
| 208 | do { | 208 | do { |
| 209 | own = ent->lay->status_own; | 209 | own = READ_ONCE(ent->lay->status_own); |
| 210 | if (!(own & CMD_OWNER_HW)) { | 210 | if (!(own & CMD_OWNER_HW)) { |
| 211 | ent->ret = 0; | 211 | ent->ret = 0; |
| 212 | return; | 212 | return; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index eddd7702680b..e88340e196f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | |||
| @@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = { | |||
| 183 | 183 | ||
| 184 | void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) | 184 | void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) |
| 185 | { | 185 | { |
| 186 | u32 caps = mlx5_accel_tls_device_caps(priv->mdev); | ||
| 187 | struct net_device *netdev = priv->netdev; | 186 | struct net_device *netdev = priv->netdev; |
| 187 | u32 caps; | ||
| 188 | 188 | ||
| 189 | if (!mlx5_accel_is_tls_device(priv->mdev)) | 189 | if (!mlx5_accel_is_tls_device(priv->mdev)) |
| 190 | return; | 190 | return; |
| 191 | 191 | ||
| 192 | caps = mlx5_accel_tls_device_caps(priv->mdev); | ||
| 192 | if (caps & MLX5_ACCEL_TLS_TX) { | 193 | if (caps & MLX5_ACCEL_TLS_TX) { |
| 193 | netdev->features |= NETIF_F_HW_TLS_TX; | 194 | netdev->features |= NETIF_F_HW_TLS_TX; |
| 194 | netdev->hw_features |= NETIF_F_HW_TLS_TX; | 195 | netdev->hw_features |= NETIF_F_HW_TLS_TX; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a7939e70190..54118b77dc1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) | |||
| 4315 | } | 4315 | } |
| 4316 | } | 4316 | } |
| 4317 | 4317 | ||
| 4318 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4319 | /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without | ||
| 4320 | * reenabling interrupts. | ||
| 4321 | */ | ||
| 4322 | static void mlx5e_netpoll(struct net_device *dev) | ||
| 4323 | { | ||
| 4324 | struct mlx5e_priv *priv = netdev_priv(dev); | ||
| 4325 | struct mlx5e_channels *chs = &priv->channels; | ||
| 4326 | |||
| 4327 | int i; | ||
| 4328 | |||
| 4329 | for (i = 0; i < chs->num; i++) | ||
| 4330 | napi_schedule(&chs->c[i]->napi); | ||
| 4331 | } | ||
| 4332 | #endif | ||
| 4333 | |||
| 4334 | static const struct net_device_ops mlx5e_netdev_ops = { | 4318 | static const struct net_device_ops mlx5e_netdev_ops = { |
| 4335 | .ndo_open = mlx5e_open, | 4319 | .ndo_open = mlx5e_open, |
| 4336 | .ndo_stop = mlx5e_close, | 4320 | .ndo_stop = mlx5e_close, |
| @@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = { | |||
| 4356 | #ifdef CONFIG_MLX5_EN_ARFS | 4340 | #ifdef CONFIG_MLX5_EN_ARFS |
| 4357 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, | 4341 | .ndo_rx_flow_steer = mlx5e_rx_flow_steer, |
| 4358 | #endif | 4342 | #endif |
| 4359 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 4360 | .ndo_poll_controller = mlx5e_netpoll, | ||
| 4361 | #endif | ||
| 4362 | #ifdef CONFIG_MLX5_ESWITCH | 4343 | #ifdef CONFIG_MLX5_ESWITCH |
| 4363 | /* SRIOV E-Switch NDOs */ | 4344 | /* SRIOV E-Switch NDOs */ |
| 4364 | .ndo_set_vf_mac = mlx5e_set_vf_mac, | 4345 | .ndo_set_vf_mac = mlx5e_set_vf_mac, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index dae1c5c5d27c..d2f76070ea7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c | |||
| @@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, | |||
| 509 | 509 | ||
| 510 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); | 510 | sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); |
| 511 | 511 | ||
| 512 | if (next_state == MLX5_RQC_STATE_RDY) { | 512 | if (next_state == MLX5_SQC_STATE_RDY) { |
| 513 | MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); | 513 | MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); |
| 514 | MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); | 514 | MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); |
| 515 | } | 515 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 930700413b1d..b492152c8881 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -44,8 +44,8 @@ | |||
| 44 | #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) | 44 | #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) |
| 45 | 45 | ||
| 46 | #define MLXSW_SP1_FWREV_MAJOR 13 | 46 | #define MLXSW_SP1_FWREV_MAJOR 13 |
| 47 | #define MLXSW_SP1_FWREV_MINOR 1702 | 47 | #define MLXSW_SP1_FWREV_MINOR 1703 |
| 48 | #define MLXSW_SP1_FWREV_SUBMINOR 6 | 48 | #define MLXSW_SP1_FWREV_SUBMINOR 4 |
| 49 | #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 | 49 | #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 |
| 50 | 50 | ||
| 51 | static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { | 51 | static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { |
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 26bb3b18f3be..3cdf63e35b53 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c | |||
| @@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) | |||
| 91 | struct sk_buff *skb; | 91 | struct sk_buff *skb; |
| 92 | struct net_device *dev; | 92 | struct net_device *dev; |
| 93 | u32 *buf; | 93 | u32 *buf; |
| 94 | int sz, len; | 94 | int sz, len, buf_len; |
| 95 | u32 ifh[4]; | 95 | u32 ifh[4]; |
| 96 | u32 val; | 96 | u32 val; |
| 97 | struct frame_info info; | 97 | struct frame_info info; |
| @@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg) | |||
| 116 | err = -ENOMEM; | 116 | err = -ENOMEM; |
| 117 | break; | 117 | break; |
| 118 | } | 118 | } |
| 119 | buf = (u32 *)skb_put(skb, info.len); | 119 | buf_len = info.len - ETH_FCS_LEN; |
| 120 | buf = (u32 *)skb_put(skb, buf_len); | ||
| 120 | 121 | ||
| 121 | len = 0; | 122 | len = 0; |
| 122 | do { | 123 | do { |
| 123 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); | 124 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); |
| 124 | *buf++ = val; | 125 | *buf++ = val; |
| 125 | len += sz; | 126 | len += sz; |
| 126 | } while ((sz == 4) && (len < info.len)); | 127 | } while (len < buf_len); |
| 128 | |||
| 129 | /* Read the FCS and discard it */ | ||
| 130 | sz = ocelot_rx_frame_word(ocelot, grp, false, &val); | ||
| 131 | /* Update the statistics if part of the FCS was read before */ | ||
| 132 | len -= ETH_FCS_LEN - sz; | ||
| 127 | 133 | ||
| 128 | if (sz < 0) { | 134 | if (sz < 0) { |
| 129 | err = sz; | 135 | err = sz; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 253bdaef1505..8ed38fd5a852 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
| @@ -3146,21 +3146,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) | |||
| 3146 | return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); | 3146 | return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); |
| 3147 | } | 3147 | } |
| 3148 | 3148 | ||
| 3149 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3150 | static void nfp_net_netpoll(struct net_device *netdev) | ||
| 3151 | { | ||
| 3152 | struct nfp_net *nn = netdev_priv(netdev); | ||
| 3153 | int i; | ||
| 3154 | |||
| 3155 | /* nfp_net's NAPIs are statically allocated so even if there is a race | ||
| 3156 | * with reconfig path this will simply try to schedule some disabled | ||
| 3157 | * NAPI instances. | ||
| 3158 | */ | ||
| 3159 | for (i = 0; i < nn->dp.num_stack_tx_rings; i++) | ||
| 3160 | napi_schedule_irqoff(&nn->r_vecs[i].napi); | ||
| 3161 | } | ||
| 3162 | #endif | ||
| 3163 | |||
| 3164 | static void nfp_net_stat64(struct net_device *netdev, | 3149 | static void nfp_net_stat64(struct net_device *netdev, |
| 3165 | struct rtnl_link_stats64 *stats) | 3150 | struct rtnl_link_stats64 *stats) |
| 3166 | { | 3151 | { |
| @@ -3519,9 +3504,6 @@ const struct net_device_ops nfp_net_netdev_ops = { | |||
| 3519 | .ndo_get_stats64 = nfp_net_stat64, | 3504 | .ndo_get_stats64 = nfp_net_stat64, |
| 3520 | .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, | 3505 | .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, |
| 3521 | .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, | 3506 | .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, |
| 3522 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 3523 | .ndo_poll_controller = nfp_net_netpoll, | ||
| 3524 | #endif | ||
| 3525 | .ndo_set_vf_mac = nfp_app_set_vf_mac, | 3507 | .ndo_set_vf_mac = nfp_app_set_vf_mac, |
| 3526 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, | 3508 | .ndo_set_vf_vlan = nfp_app_set_vf_vlan, |
| 3527 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, | 3509 | .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6bb76e6d3c14..f5459de6d60a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
| @@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) | |||
| 190 | 190 | ||
| 191 | static void | 191 | static void |
| 192 | qed_dcbx_set_params(struct qed_dcbx_results *p_data, | 192 | qed_dcbx_set_params(struct qed_dcbx_results *p_data, |
| 193 | struct qed_hw_info *p_info, | 193 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 194 | bool enable, | 194 | bool enable, u8 prio, u8 tc, |
| 195 | u8 prio, | ||
| 196 | u8 tc, | ||
| 197 | enum dcbx_protocol_type type, | 195 | enum dcbx_protocol_type type, |
| 198 | enum qed_pci_personality personality) | 196 | enum qed_pci_personality personality) |
| 199 | { | 197 | { |
| @@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data, | |||
| 206 | else | 204 | else |
| 207 | p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; | 205 | p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; |
| 208 | 206 | ||
| 207 | /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ | ||
| 208 | if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || | ||
| 209 | test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) | ||
| 210 | p_data->arr[type].dont_add_vlan0 = true; | ||
| 211 | |||
| 209 | /* QM reconf data */ | 212 | /* QM reconf data */ |
| 210 | if (p_info->personality == personality) | 213 | if (p_hwfn->hw_info.personality == personality) |
| 211 | qed_hw_info_set_offload_tc(p_info, tc); | 214 | qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); |
| 215 | |||
| 216 | /* Configure dcbx vlan priority in doorbell block for roce EDPM */ | ||
| 217 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && | ||
| 218 | type == DCBX_PROTOCOL_ROCE) { | ||
| 219 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); | ||
| 220 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); | ||
| 221 | } | ||
| 212 | } | 222 | } |
| 213 | 223 | ||
| 214 | /* Update app protocol data and hw_info fields with the TLV info */ | 224 | /* Update app protocol data and hw_info fields with the TLV info */ |
| 215 | static void | 225 | static void |
| 216 | qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | 226 | qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, |
| 217 | struct qed_hwfn *p_hwfn, | 227 | struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 218 | bool enable, | 228 | bool enable, u8 prio, u8 tc, |
| 219 | u8 prio, u8 tc, enum dcbx_protocol_type type) | 229 | enum dcbx_protocol_type type) |
| 220 | { | 230 | { |
| 221 | struct qed_hw_info *p_info = &p_hwfn->hw_info; | ||
| 222 | enum qed_pci_personality personality; | 231 | enum qed_pci_personality personality; |
| 223 | enum dcbx_protocol_type id; | 232 | enum dcbx_protocol_type id; |
| 224 | int i; | 233 | int i; |
| @@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
| 231 | 240 | ||
| 232 | personality = qed_dcbx_app_update[i].personality; | 241 | personality = qed_dcbx_app_update[i].personality; |
| 233 | 242 | ||
| 234 | qed_dcbx_set_params(p_data, p_info, enable, | 243 | qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, |
| 235 | prio, tc, type, personality); | 244 | prio, tc, type, personality); |
| 236 | } | 245 | } |
| 237 | } | 246 | } |
| @@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | |||
| 265 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. | 274 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. |
| 266 | */ | 275 | */ |
| 267 | static int | 276 | static int |
| 268 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 277 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 269 | struct qed_dcbx_results *p_data, | 278 | struct qed_dcbx_results *p_data, |
| 270 | struct dcbx_app_priority_entry *p_tbl, | 279 | struct dcbx_app_priority_entry *p_tbl, |
| 271 | u32 pri_tc_tbl, int count, u8 dcbx_version) | 280 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
| @@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 309 | enable = true; | 318 | enable = true; |
| 310 | } | 319 | } |
| 311 | 320 | ||
| 312 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, | 321 | qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, |
| 313 | priority, tc, type); | 322 | priority, tc, type); |
| 314 | } | 323 | } |
| 315 | } | 324 | } |
| @@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 331 | continue; | 340 | continue; |
| 332 | 341 | ||
| 333 | enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; | 342 | enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; |
| 334 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, | 343 | qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, |
| 335 | priority, tc, type); | 344 | priority, tc, type); |
| 336 | } | 345 | } |
| 337 | 346 | ||
| @@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 341 | /* Parse app TLV's to update TC information in hw_info structure for | 350 | /* Parse app TLV's to update TC information in hw_info structure for |
| 342 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. | 351 | * reconfiguring QM. Get protocol specific data for PF update ramrod command. |
| 343 | */ | 352 | */ |
| 344 | static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | 353 | static int |
| 354 | qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | ||
| 345 | { | 355 | { |
| 346 | struct dcbx_app_priority_feature *p_app; | 356 | struct dcbx_app_priority_feature *p_app; |
| 347 | struct dcbx_app_priority_entry *p_tbl; | 357 | struct dcbx_app_priority_entry *p_tbl; |
| @@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
| 365 | p_info = &p_hwfn->hw_info; | 375 | p_info = &p_hwfn->hw_info; |
| 366 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 376 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
| 367 | 377 | ||
| 368 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 378 | rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, |
| 369 | num_entries, dcbx_version); | 379 | num_entries, dcbx_version); |
| 370 | if (rc) | 380 | if (rc) |
| 371 | return rc; | 381 | return rc; |
| @@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, | |||
| 891 | return rc; | 901 | return rc; |
| 892 | 902 | ||
| 893 | if (type == QED_DCBX_OPERATIONAL_MIB) { | 903 | if (type == QED_DCBX_OPERATIONAL_MIB) { |
| 894 | rc = qed_dcbx_process_mib_info(p_hwfn); | 904 | rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); |
| 895 | if (!rc) { | 905 | if (!rc) { |
| 896 | /* reconfigure tcs of QM queues according | 906 | /* reconfigure tcs of QM queues according |
| 897 | * to negotiation results | 907 | * to negotiation results |
| @@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, | |||
| 954 | p_data->dcb_enable_flag = p_src->arr[type].enable; | 964 | p_data->dcb_enable_flag = p_src->arr[type].enable; |
| 955 | p_data->dcb_priority = p_src->arr[type].priority; | 965 | p_data->dcb_priority = p_src->arr[type].priority; |
| 956 | p_data->dcb_tc = p_src->arr[type].tc; | 966 | p_data->dcb_tc = p_src->arr[type].tc; |
| 967 | p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; | ||
| 957 | } | 968 | } |
| 958 | 969 | ||
| 959 | /* Set pf update ramrod command params */ | 970 | /* Set pf update ramrod command params */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index a4d688c04e18..01f253ea4b22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h | |||
| @@ -55,6 +55,7 @@ struct qed_dcbx_app_data { | |||
| 55 | u8 update; /* Update indication */ | 55 | u8 update; /* Update indication */ |
| 56 | u8 priority; /* Priority */ | 56 | u8 priority; /* Priority */ |
| 57 | u8 tc; /* Traffic Class */ | 57 | u8 tc; /* Traffic Class */ |
| 58 | bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | #define QED_DCBX_VERSION_DISABLED 0 | 61 | #define QED_DCBX_VERSION_DISABLED 0 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 016ca8a7ec8a..97f073fd3725 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn, | |||
| 1706 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | 1706 | int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) |
| 1707 | { | 1707 | { |
| 1708 | struct qed_load_req_params load_req_params; | 1708 | struct qed_load_req_params load_req_params; |
| 1709 | u32 load_code, param, drv_mb_param; | 1709 | u32 load_code, resp, param, drv_mb_param; |
| 1710 | bool b_default_mtu = true; | 1710 | bool b_default_mtu = true; |
| 1711 | struct qed_hwfn *p_hwfn; | 1711 | struct qed_hwfn *p_hwfn; |
| 1712 | int rc = 0, mfw_rc, i; | 1712 | int rc = 0, mfw_rc, i; |
| @@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) | |||
| 1852 | 1852 | ||
| 1853 | if (IS_PF(cdev)) { | 1853 | if (IS_PF(cdev)) { |
| 1854 | p_hwfn = QED_LEADING_HWFN(cdev); | 1854 | p_hwfn = QED_LEADING_HWFN(cdev); |
| 1855 | |||
| 1856 | /* Get pre-negotiated values for stag, bandwidth etc. */ | ||
| 1857 | DP_VERBOSE(p_hwfn, | ||
| 1858 | QED_MSG_SPQ, | ||
| 1859 | "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); | ||
| 1860 | drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; | ||
| 1861 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, | ||
| 1862 | DRV_MSG_CODE_GET_OEM_UPDATES, | ||
| 1863 | drv_mb_param, &resp, ¶m); | ||
| 1864 | if (rc) | ||
| 1865 | DP_NOTICE(p_hwfn, | ||
| 1866 | "Failed to send GET_OEM_UPDATES attention request\n"); | ||
| 1867 | |||
| 1855 | drv_mb_param = STORM_FW_VERSION; | 1868 | drv_mb_param = STORM_FW_VERSION; |
| 1856 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, | 1869 | rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, |
| 1857 | DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, | 1870 | DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8faceb691657..9b3ef00e5782 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
| @@ -12414,6 +12414,7 @@ struct public_drv_mb { | |||
| 12414 | #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 | 12414 | #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 |
| 12415 | #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 | 12415 | #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 |
| 12416 | #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 | 12416 | #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 |
| 12417 | #define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000 | ||
| 12417 | 12418 | ||
| 12418 | #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 | 12419 | #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 |
| 12419 | #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 | 12420 | #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 |
| @@ -12541,6 +12542,9 @@ struct public_drv_mb { | |||
| 12541 | #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 | 12542 | #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 |
| 12542 | #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 | 12543 | #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 |
| 12543 | 12544 | ||
| 12545 | #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 | ||
| 12546 | #define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 | ||
| 12547 | |||
| 12544 | #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 | 12548 | #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 |
| 12545 | #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 | 12549 | #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 |
| 12546 | #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 | 12550 | #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 5d37ec7e9b0b..58c7eb9d8e1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c | |||
| @@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 1581 | p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & | 1581 | p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & |
| 1582 | FUNC_MF_CFG_OV_STAG_MASK; | 1582 | FUNC_MF_CFG_OV_STAG_MASK; |
| 1583 | p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; | 1583 | p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; |
| 1584 | if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && | 1584 | if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { |
| 1585 | (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { | 1585 | if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { |
| 1586 | qed_wr(p_hwfn, p_ptt, | 1586 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, |
| 1587 | NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); | 1587 | p_hwfn->hw_info.ovlan); |
| 1588 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); | ||
| 1589 | |||
| 1590 | /* Configure DB to add external vlan to EDPM packets */ | ||
| 1591 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); | ||
| 1592 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, | ||
| 1593 | p_hwfn->hw_info.ovlan); | ||
| 1594 | } else { | ||
| 1595 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); | ||
| 1596 | qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); | ||
| 1597 | qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); | ||
| 1598 | qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); | ||
| 1599 | } | ||
| 1600 | |||
| 1588 | qed_sp_pf_update_stag(p_hwfn); | 1601 | qed_sp_pf_update_stag(p_hwfn); |
| 1589 | } | 1602 | } |
| 1590 | 1603 | ||
| 1604 | DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", | ||
| 1605 | p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); | ||
| 1606 | |||
| 1591 | /* Acknowledge the MFW */ | 1607 | /* Acknowledge the MFW */ |
| 1592 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, | 1608 | qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, |
| 1593 | &resp, ¶m); | 1609 | &resp, ¶m); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index f736f70956fd..2440970882c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
| @@ -216,6 +216,12 @@ | |||
| 216 | 0x00c000UL | 216 | 0x00c000UL |
| 217 | #define DORQ_REG_IFEN \ | 217 | #define DORQ_REG_IFEN \ |
| 218 | 0x100040UL | 218 | 0x100040UL |
| 219 | #define DORQ_REG_TAG1_OVRD_MODE \ | ||
| 220 | 0x1008b4UL | ||
| 221 | #define DORQ_REG_PF_PCP_BB_K2 \ | ||
| 222 | 0x1008c4UL | ||
| 223 | #define DORQ_REG_PF_EXT_VID_BB_K2 \ | ||
| 224 | 0x1008c8UL | ||
| 219 | #define DORQ_REG_DB_DROP_REASON \ | 225 | #define DORQ_REG_DB_DROP_REASON \ |
| 220 | 0x100a2cUL | 226 | 0x100a2cUL |
| 221 | #define DORQ_REG_DB_DROP_DETAILS \ | 227 | #define DORQ_REG_DB_DROP_DETAILS \ |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index bb529ff2ca81..ab30aaeac6d3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -4071,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) | |||
| 4071 | phy_speed_up(dev->phydev); | 4071 | phy_speed_up(dev->phydev); |
| 4072 | 4072 | ||
| 4073 | genphy_soft_reset(dev->phydev); | 4073 | genphy_soft_reset(dev->phydev); |
| 4074 | |||
| 4075 | /* It was reported that chip version 33 ends up with 10MBit/Half on a | ||
| 4076 | * 1GBit link after resuming from S3. For whatever reason the PHY on | ||
| 4077 | * this chip doesn't properly start a renegotiation when soft-reset. | ||
| 4078 | * Explicitly requesting a renegotiation fixes this. | ||
| 4079 | */ | ||
| 4080 | if (tp->mac_version == RTL_GIGA_MAC_VER_33 && | ||
| 4081 | dev->phydev->autoneg == AUTONEG_ENABLE) | ||
| 4082 | phy_restart_aneg(dev->phydev); | ||
| 4074 | } | 4083 | } |
| 4075 | 4084 | ||
| 4076 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) | 4085 | static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) |
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 1470fc12282b..9b6bf557a2f5 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h | |||
| @@ -428,6 +428,7 @@ enum EIS_BIT { | |||
| 428 | EIS_CULF1 = 0x00000080, | 428 | EIS_CULF1 = 0x00000080, |
| 429 | EIS_TFFF = 0x00000100, | 429 | EIS_TFFF = 0x00000100, |
| 430 | EIS_QFS = 0x00010000, | 430 | EIS_QFS = 0x00010000, |
| 431 | EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), | ||
| 431 | }; | 432 | }; |
| 432 | 433 | ||
| 433 | /* RIC0 */ | 434 | /* RIC0 */ |
| @@ -472,6 +473,7 @@ enum RIS0_BIT { | |||
| 472 | RIS0_FRF15 = 0x00008000, | 473 | RIS0_FRF15 = 0x00008000, |
| 473 | RIS0_FRF16 = 0x00010000, | 474 | RIS0_FRF16 = 0x00010000, |
| 474 | RIS0_FRF17 = 0x00020000, | 475 | RIS0_FRF17 = 0x00020000, |
| 476 | RIS0_RESERVED = GENMASK(31, 18), | ||
| 475 | }; | 477 | }; |
| 476 | 478 | ||
| 477 | /* RIC1 */ | 479 | /* RIC1 */ |
| @@ -528,6 +530,7 @@ enum RIS2_BIT { | |||
| 528 | RIS2_QFF16 = 0x00010000, | 530 | RIS2_QFF16 = 0x00010000, |
| 529 | RIS2_QFF17 = 0x00020000, | 531 | RIS2_QFF17 = 0x00020000, |
| 530 | RIS2_RFFF = 0x80000000, | 532 | RIS2_RFFF = 0x80000000, |
| 533 | RIS2_RESERVED = GENMASK(30, 18), | ||
| 531 | }; | 534 | }; |
| 532 | 535 | ||
| 533 | /* TIC */ | 536 | /* TIC */ |
| @@ -544,6 +547,7 @@ enum TIS_BIT { | |||
| 544 | TIS_FTF1 = 0x00000002, /* Undocumented? */ | 547 | TIS_FTF1 = 0x00000002, /* Undocumented? */ |
| 545 | TIS_TFUF = 0x00000100, | 548 | TIS_TFUF = 0x00000100, |
| 546 | TIS_TFWF = 0x00000200, | 549 | TIS_TFWF = 0x00000200, |
| 550 | TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) | ||
| 547 | }; | 551 | }; |
| 548 | 552 | ||
| 549 | /* ISS */ | 553 | /* ISS */ |
| @@ -617,6 +621,7 @@ enum GIC_BIT { | |||
| 617 | enum GIS_BIT { | 621 | enum GIS_BIT { |
| 618 | GIS_PTCF = 0x00000001, /* Undocumented? */ | 622 | GIS_PTCF = 0x00000001, /* Undocumented? */ |
| 619 | GIS_PTMF = 0x00000004, | 623 | GIS_PTMF = 0x00000004, |
| 624 | GIS_RESERVED = GENMASK(15, 10), | ||
| 620 | }; | 625 | }; |
| 621 | 626 | ||
| 622 | /* GIE (R-Car Gen3 only) */ | 627 | /* GIE (R-Car Gen3 only) */ |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index aff5516b781e..d6f753925352 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev) | |||
| 739 | u32 eis, ris2; | 739 | u32 eis, ris2; |
| 740 | 740 | ||
| 741 | eis = ravb_read(ndev, EIS); | 741 | eis = ravb_read(ndev, EIS); |
| 742 | ravb_write(ndev, ~EIS_QFS, EIS); | 742 | ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); |
| 743 | if (eis & EIS_QFS) { | 743 | if (eis & EIS_QFS) { |
| 744 | ris2 = ravb_read(ndev, RIS2); | 744 | ris2 = ravb_read(ndev, RIS2); |
| 745 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); | 745 | ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), |
| 746 | RIS2); | ||
| 746 | 747 | ||
| 747 | /* Receive Descriptor Empty int */ | 748 | /* Receive Descriptor Empty int */ |
| 748 | if (ris2 & RIS2_QFF0) | 749 | if (ris2 & RIS2_QFF0) |
| @@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev) | |||
| 795 | u32 tis = ravb_read(ndev, TIS); | 796 | u32 tis = ravb_read(ndev, TIS); |
| 796 | 797 | ||
| 797 | if (tis & TIS_TFUF) { | 798 | if (tis & TIS_TFUF) { |
| 798 | ravb_write(ndev, ~TIS_TFUF, TIS); | 799 | ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); |
| 799 | ravb_get_tx_tstamp(ndev); | 800 | ravb_get_tx_tstamp(ndev); |
| 800 | return true; | 801 | return true; |
| 801 | } | 802 | } |
| @@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
| 930 | /* Processing RX Descriptor Ring */ | 931 | /* Processing RX Descriptor Ring */ |
| 931 | if (ris0 & mask) { | 932 | if (ris0 & mask) { |
| 932 | /* Clear RX interrupt */ | 933 | /* Clear RX interrupt */ |
| 933 | ravb_write(ndev, ~mask, RIS0); | 934 | ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); |
| 934 | if (ravb_rx(ndev, "a, q)) | 935 | if (ravb_rx(ndev, "a, q)) |
| 935 | goto out; | 936 | goto out; |
| 936 | } | 937 | } |
| @@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) | |||
| 938 | if (tis & mask) { | 939 | if (tis & mask) { |
| 939 | spin_lock_irqsave(&priv->lock, flags); | 940 | spin_lock_irqsave(&priv->lock, flags); |
| 940 | /* Clear TX interrupt */ | 941 | /* Clear TX interrupt */ |
| 941 | ravb_write(ndev, ~mask, TIS); | 942 | ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); |
| 942 | ravb_tx_free(ndev, q, true); | 943 | ravb_tx_free(ndev, q, true); |
| 943 | netif_wake_subqueue(ndev, q); | 944 | netif_wake_subqueue(ndev, q); |
| 944 | mmiowb(); | 945 | mmiowb(); |
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 0721b5c35d91..dce2a40a31e3 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c | |||
| @@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev) | |||
| 315 | } | 315 | } |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | ravb_write(ndev, ~gis, GIS); | 318 | ravb_write(ndev, ~(gis | GIS_RESERVED), GIS); |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) | 321 | void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index c5bc124b41a9..d1bb73bf9914 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c | |||
| @@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev); | |||
| 77 | static int ether3_rx(struct net_device *dev, unsigned int maxcnt); | 77 | static int ether3_rx(struct net_device *dev, unsigned int maxcnt); |
| 78 | static void ether3_tx(struct net_device *dev); | 78 | static void ether3_tx(struct net_device *dev); |
| 79 | static int ether3_open (struct net_device *dev); | 79 | static int ether3_open (struct net_device *dev); |
| 80 | static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); | 80 | static netdev_tx_t ether3_sendpacket(struct sk_buff *skb, |
| 81 | struct net_device *dev); | ||
| 81 | static irqreturn_t ether3_interrupt (int irq, void *dev_id); | 82 | static irqreturn_t ether3_interrupt (int irq, void *dev_id); |
| 82 | static int ether3_close (struct net_device *dev); | 83 | static int ether3_close (struct net_device *dev); |
| 83 | static void ether3_setmulticastlist (struct net_device *dev); | 84 | static void ether3_setmulticastlist (struct net_device *dev); |
| @@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev) | |||
| 481 | /* | 482 | /* |
| 482 | * Transmit a packet | 483 | * Transmit a packet |
| 483 | */ | 484 | */ |
| 484 | static int | 485 | static netdev_tx_t |
| 485 | ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) | 486 | ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) |
| 486 | { | 487 | { |
| 487 | unsigned long flags; | 488 | unsigned long flags; |
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index 573691bc3b71..70cce63a6081 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c | |||
| @@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev) | |||
| 578 | return 0; | 578 | return 0; |
| 579 | } | 579 | } |
| 580 | 580 | ||
| 581 | static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | 581 | static netdev_tx_t |
| 582 | sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 582 | { | 583 | { |
| 583 | struct sgiseeq_private *sp = netdev_priv(dev); | 584 | struct sgiseeq_private *sp = netdev_priv(dev); |
| 584 | struct hpc3_ethregs *hregs = sp->hregs; | 585 | struct hpc3_ethregs *hregs = sp->hregs; |
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 18d533fdf14c..3140999642ba 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c | |||
| @@ -99,7 +99,7 @@ struct ioc3_private { | |||
| 99 | 99 | ||
| 100 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 100 | static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
| 101 | static void ioc3_set_multicast_list(struct net_device *dev); | 101 | static void ioc3_set_multicast_list(struct net_device *dev); |
| 102 | static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); | 102 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); |
| 103 | static void ioc3_timeout(struct net_device *dev); | 103 | static void ioc3_timeout(struct net_device *dev); |
| 104 | static inline unsigned int ioc3_hash(const unsigned char *addr); | 104 | static inline unsigned int ioc3_hash(const unsigned char *addr); |
| 105 | static inline void ioc3_stop(struct ioc3_private *ip); | 105 | static inline void ioc3_stop(struct ioc3_private *ip); |
| @@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = { | |||
| 1390 | .remove = ioc3_remove_one, | 1390 | .remove = ioc3_remove_one, |
| 1391 | }; | 1391 | }; |
| 1392 | 1392 | ||
| 1393 | static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1393 | static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1394 | { | 1394 | { |
| 1395 | unsigned long data; | 1395 | unsigned long data; |
| 1396 | struct ioc3_private *ip = netdev_priv(dev); | 1396 | struct ioc3_private *ip = netdev_priv(dev); |
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index ea55abd62ec7..703fbbefea44 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c | |||
| @@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) | |||
| 697 | /* | 697 | /* |
| 698 | * Transmit a packet (called by the kernel) | 698 | * Transmit a packet (called by the kernel) |
| 699 | */ | 699 | */ |
| 700 | static int meth_tx(struct sk_buff *skb, struct net_device *dev) | 700 | static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev) |
| 701 | { | 701 | { |
| 702 | struct meth_private *priv = netdev_priv(dev); | 702 | struct meth_private *priv = netdev_priv(dev); |
| 703 | unsigned long flags; | 703 | unsigned long flags; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1854f270ad66..b1b305f8f414 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
| @@ -258,10 +258,10 @@ struct stmmac_safety_stats { | |||
| 258 | #define MAX_DMA_RIWT 0xff | 258 | #define MAX_DMA_RIWT 0xff |
| 259 | #define MIN_DMA_RIWT 0x20 | 259 | #define MIN_DMA_RIWT 0x20 |
| 260 | /* Tx coalesce parameters */ | 260 | /* Tx coalesce parameters */ |
| 261 | #define STMMAC_COAL_TX_TIMER 40000 | 261 | #define STMMAC_COAL_TX_TIMER 1000 |
| 262 | #define STMMAC_MAX_COAL_TX_TICK 100000 | 262 | #define STMMAC_MAX_COAL_TX_TICK 100000 |
| 263 | #define STMMAC_TX_MAX_FRAMES 256 | 263 | #define STMMAC_TX_MAX_FRAMES 256 |
| 264 | #define STMMAC_TX_FRAMES 64 | 264 | #define STMMAC_TX_FRAMES 25 |
| 265 | 265 | ||
| 266 | /* Packets types */ | 266 | /* Packets types */ |
| 267 | enum packets_types { | 267 | enum packets_types { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c0a855b7ab3b..63e1064b27a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
| @@ -48,6 +48,8 @@ struct stmmac_tx_info { | |||
| 48 | 48 | ||
| 49 | /* Frequently used values are kept adjacent for cache effect */ | 49 | /* Frequently used values are kept adjacent for cache effect */ |
| 50 | struct stmmac_tx_queue { | 50 | struct stmmac_tx_queue { |
| 51 | u32 tx_count_frames; | ||
| 52 | struct timer_list txtimer; | ||
| 51 | u32 queue_index; | 53 | u32 queue_index; |
| 52 | struct stmmac_priv *priv_data; | 54 | struct stmmac_priv *priv_data; |
| 53 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; | 55 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
| @@ -73,7 +75,14 @@ struct stmmac_rx_queue { | |||
| 73 | u32 rx_zeroc_thresh; | 75 | u32 rx_zeroc_thresh; |
| 74 | dma_addr_t dma_rx_phy; | 76 | dma_addr_t dma_rx_phy; |
| 75 | u32 rx_tail_addr; | 77 | u32 rx_tail_addr; |
| 78 | }; | ||
| 79 | |||
| 80 | struct stmmac_channel { | ||
| 76 | struct napi_struct napi ____cacheline_aligned_in_smp; | 81 | struct napi_struct napi ____cacheline_aligned_in_smp; |
| 82 | struct stmmac_priv *priv_data; | ||
| 83 | u32 index; | ||
| 84 | int has_rx; | ||
| 85 | int has_tx; | ||
| 77 | }; | 86 | }; |
| 78 | 87 | ||
| 79 | struct stmmac_tc_entry { | 88 | struct stmmac_tc_entry { |
| @@ -109,14 +118,12 @@ struct stmmac_pps_cfg { | |||
| 109 | 118 | ||
| 110 | struct stmmac_priv { | 119 | struct stmmac_priv { |
| 111 | /* Frequently used values are kept adjacent for cache effect */ | 120 | /* Frequently used values are kept adjacent for cache effect */ |
| 112 | u32 tx_count_frames; | ||
| 113 | u32 tx_coal_frames; | 121 | u32 tx_coal_frames; |
| 114 | u32 tx_coal_timer; | 122 | u32 tx_coal_timer; |
| 115 | 123 | ||
| 116 | int tx_coalesce; | 124 | int tx_coalesce; |
| 117 | int hwts_tx_en; | 125 | int hwts_tx_en; |
| 118 | bool tx_path_in_lpi_mode; | 126 | bool tx_path_in_lpi_mode; |
| 119 | struct timer_list txtimer; | ||
| 120 | bool tso; | 127 | bool tso; |
| 121 | 128 | ||
| 122 | unsigned int dma_buf_sz; | 129 | unsigned int dma_buf_sz; |
| @@ -137,6 +144,9 @@ struct stmmac_priv { | |||
| 137 | /* TX Queue */ | 144 | /* TX Queue */ |
| 138 | struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; | 145 | struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; |
| 139 | 146 | ||
| 147 | /* Generic channel for NAPI */ | ||
| 148 | struct stmmac_channel channel[STMMAC_CH_MAX]; | ||
| 149 | |||
| 140 | bool oldlink; | 150 | bool oldlink; |
| 141 | int speed; | 151 | int speed; |
| 142 | int oldduplex; | 152 | int oldduplex; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9f458bb16f2a..75896d6ba6e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -148,12 +148,14 @@ static void stmmac_verify_args(void) | |||
| 148 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) | 148 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) |
| 149 | { | 149 | { |
| 150 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | 150 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
| 151 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | ||
| 152 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); | ||
| 151 | u32 queue; | 153 | u32 queue; |
| 152 | 154 | ||
| 153 | for (queue = 0; queue < rx_queues_cnt; queue++) { | 155 | for (queue = 0; queue < maxq; queue++) { |
| 154 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 156 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 155 | 157 | ||
| 156 | napi_disable(&rx_q->napi); | 158 | napi_disable(&ch->napi); |
| 157 | } | 159 | } |
| 158 | } | 160 | } |
| 159 | 161 | ||
| @@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv) | |||
| 164 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) | 166 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) |
| 165 | { | 167 | { |
| 166 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | 168 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; |
| 169 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | ||
| 170 | u32 maxq = max(rx_queues_cnt, tx_queues_cnt); | ||
| 167 | u32 queue; | 171 | u32 queue; |
| 168 | 172 | ||
| 169 | for (queue = 0; queue < rx_queues_cnt; queue++) { | 173 | for (queue = 0; queue < maxq; queue++) { |
| 170 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 174 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 171 | 175 | ||
| 172 | napi_enable(&rx_q->napi); | 176 | napi_enable(&ch->napi); |
| 173 | } | 177 | } |
| 174 | } | 178 | } |
| 175 | 179 | ||
| @@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |||
| 1843 | * @queue: TX queue index | 1847 | * @queue: TX queue index |
| 1844 | * Description: it reclaims the transmit resources after transmission completes. | 1848 | * Description: it reclaims the transmit resources after transmission completes. |
| 1845 | */ | 1849 | */ |
| 1846 | static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | 1850 | static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) |
| 1847 | { | 1851 | { |
| 1848 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | 1852 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
| 1849 | unsigned int bytes_compl = 0, pkts_compl = 0; | 1853 | unsigned int bytes_compl = 0, pkts_compl = 0; |
| 1850 | unsigned int entry; | 1854 | unsigned int entry, count = 0; |
| 1851 | 1855 | ||
| 1852 | netif_tx_lock(priv->dev); | 1856 | __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); |
| 1853 | 1857 | ||
| 1854 | priv->xstats.tx_clean++; | 1858 | priv->xstats.tx_clean++; |
| 1855 | 1859 | ||
| 1856 | entry = tx_q->dirty_tx; | 1860 | entry = tx_q->dirty_tx; |
| 1857 | while (entry != tx_q->cur_tx) { | 1861 | while ((entry != tx_q->cur_tx) && (count < budget)) { |
| 1858 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | 1862 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; |
| 1859 | struct dma_desc *p; | 1863 | struct dma_desc *p; |
| 1860 | int status; | 1864 | int status; |
| @@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
| 1870 | if (unlikely(status & tx_dma_own)) | 1874 | if (unlikely(status & tx_dma_own)) |
| 1871 | break; | 1875 | break; |
| 1872 | 1876 | ||
| 1877 | count++; | ||
| 1878 | |||
| 1873 | /* Make sure descriptor fields are read after reading | 1879 | /* Make sure descriptor fields are read after reading |
| 1874 | * the own bit. | 1880 | * the own bit. |
| 1875 | */ | 1881 | */ |
| @@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
| 1937 | stmmac_enable_eee_mode(priv); | 1943 | stmmac_enable_eee_mode(priv); |
| 1938 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); | 1944 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
| 1939 | } | 1945 | } |
| 1940 | netif_tx_unlock(priv->dev); | 1946 | |
| 1947 | __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); | ||
| 1948 | |||
| 1949 | return count; | ||
| 1941 | } | 1950 | } |
| 1942 | 1951 | ||
| 1943 | /** | 1952 | /** |
| @@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) | |||
| 2020 | return false; | 2029 | return false; |
| 2021 | } | 2030 | } |
| 2022 | 2031 | ||
| 2032 | static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) | ||
| 2033 | { | ||
| 2034 | int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, | ||
| 2035 | &priv->xstats, chan); | ||
| 2036 | struct stmmac_channel *ch = &priv->channel[chan]; | ||
| 2037 | bool needs_work = false; | ||
| 2038 | |||
| 2039 | if ((status & handle_rx) && ch->has_rx) { | ||
| 2040 | needs_work = true; | ||
| 2041 | } else { | ||
| 2042 | status &= ~handle_rx; | ||
| 2043 | } | ||
| 2044 | |||
| 2045 | if ((status & handle_tx) && ch->has_tx) { | ||
| 2046 | needs_work = true; | ||
| 2047 | } else { | ||
| 2048 | status &= ~handle_tx; | ||
| 2049 | } | ||
| 2050 | |||
| 2051 | if (needs_work && napi_schedule_prep(&ch->napi)) { | ||
| 2052 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
| 2053 | __napi_schedule(&ch->napi); | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | return status; | ||
| 2057 | } | ||
| 2058 | |||
| 2023 | /** | 2059 | /** |
| 2024 | * stmmac_dma_interrupt - DMA ISR | 2060 | * stmmac_dma_interrupt - DMA ISR |
| 2025 | * @priv: driver private structure | 2061 | * @priv: driver private structure |
| @@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) | |||
| 2034 | u32 channels_to_check = tx_channel_count > rx_channel_count ? | 2070 | u32 channels_to_check = tx_channel_count > rx_channel_count ? |
| 2035 | tx_channel_count : rx_channel_count; | 2071 | tx_channel_count : rx_channel_count; |
| 2036 | u32 chan; | 2072 | u32 chan; |
| 2037 | bool poll_scheduled = false; | ||
| 2038 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; | 2073 | int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; |
| 2039 | 2074 | ||
| 2040 | /* Make sure we never check beyond our status buffer. */ | 2075 | /* Make sure we never check beyond our status buffer. */ |
| 2041 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) | 2076 | if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) |
| 2042 | channels_to_check = ARRAY_SIZE(status); | 2077 | channels_to_check = ARRAY_SIZE(status); |
| 2043 | 2078 | ||
| 2044 | /* Each DMA channel can be used for rx and tx simultaneously, yet | ||
| 2045 | * napi_struct is embedded in struct stmmac_rx_queue rather than in a | ||
| 2046 | * stmmac_channel struct. | ||
| 2047 | * Because of this, stmmac_poll currently checks (and possibly wakes) | ||
| 2048 | * all tx queues rather than just a single tx queue. | ||
| 2049 | */ | ||
| 2050 | for (chan = 0; chan < channels_to_check; chan++) | 2079 | for (chan = 0; chan < channels_to_check; chan++) |
| 2051 | status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, | 2080 | status[chan] = stmmac_napi_check(priv, chan); |
| 2052 | &priv->xstats, chan); | ||
| 2053 | |||
| 2054 | for (chan = 0; chan < rx_channel_count; chan++) { | ||
| 2055 | if (likely(status[chan] & handle_rx)) { | ||
| 2056 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; | ||
| 2057 | |||
| 2058 | if (likely(napi_schedule_prep(&rx_q->napi))) { | ||
| 2059 | stmmac_disable_dma_irq(priv, priv->ioaddr, chan); | ||
| 2060 | __napi_schedule(&rx_q->napi); | ||
| 2061 | poll_scheduled = true; | ||
| 2062 | } | ||
| 2063 | } | ||
| 2064 | } | ||
| 2065 | |||
| 2066 | /* If we scheduled poll, we already know that tx queues will be checked. | ||
| 2067 | * If we didn't schedule poll, see if any DMA channel (used by tx) has a | ||
| 2068 | * completed transmission, if so, call stmmac_poll (once). | ||
| 2069 | */ | ||
| 2070 | if (!poll_scheduled) { | ||
| 2071 | for (chan = 0; chan < tx_channel_count; chan++) { | ||
| 2072 | if (status[chan] & handle_tx) { | ||
| 2073 | /* It doesn't matter what rx queue we choose | ||
| 2074 | * here. We use 0 since it always exists. | ||
| 2075 | */ | ||
| 2076 | struct stmmac_rx_queue *rx_q = | ||
| 2077 | &priv->rx_queue[0]; | ||
| 2078 | |||
| 2079 | if (likely(napi_schedule_prep(&rx_q->napi))) { | ||
| 2080 | stmmac_disable_dma_irq(priv, | ||
| 2081 | priv->ioaddr, chan); | ||
| 2082 | __napi_schedule(&rx_q->napi); | ||
| 2083 | } | ||
| 2084 | break; | ||
| 2085 | } | ||
| 2086 | } | ||
| 2087 | } | ||
| 2088 | 2081 | ||
| 2089 | for (chan = 0; chan < tx_channel_count; chan++) { | 2082 | for (chan = 0; chan < tx_channel_count; chan++) { |
| 2090 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { | 2083 | if (unlikely(status[chan] & tx_hard_error_bump_tc)) { |
| @@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
| 2220 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, | 2213 | stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, |
| 2221 | tx_q->dma_tx_phy, chan); | 2214 | tx_q->dma_tx_phy, chan); |
| 2222 | 2215 | ||
| 2223 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + | 2216 | tx_q->tx_tail_addr = tx_q->dma_tx_phy; |
| 2224 | (DMA_TX_SIZE * sizeof(struct dma_desc)); | ||
| 2225 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, | 2217 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, |
| 2226 | tx_q->tx_tail_addr, chan); | 2218 | tx_q->tx_tail_addr, chan); |
| 2227 | } | 2219 | } |
| @@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
| 2233 | return ret; | 2225 | return ret; |
| 2234 | } | 2226 | } |
| 2235 | 2227 | ||
| 2228 | static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) | ||
| 2229 | { | ||
| 2230 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | ||
| 2231 | |||
| 2232 | mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
| 2233 | } | ||
| 2234 | |||
| 2236 | /** | 2235 | /** |
| 2237 | * stmmac_tx_timer - mitigation sw timer for tx. | 2236 | * stmmac_tx_timer - mitigation sw timer for tx. |
| 2238 | * @data: data pointer | 2237 | * @data: data pointer |
| @@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) | |||
| 2241 | */ | 2240 | */ |
| 2242 | static void stmmac_tx_timer(struct timer_list *t) | 2241 | static void stmmac_tx_timer(struct timer_list *t) |
| 2243 | { | 2242 | { |
| 2244 | struct stmmac_priv *priv = from_timer(priv, t, txtimer); | 2243 | struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); |
| 2245 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | 2244 | struct stmmac_priv *priv = tx_q->priv_data; |
| 2246 | u32 queue; | 2245 | struct stmmac_channel *ch; |
| 2246 | |||
| 2247 | ch = &priv->channel[tx_q->queue_index]; | ||
| 2247 | 2248 | ||
| 2248 | /* let's scan all the tx queues */ | 2249 | if (likely(napi_schedule_prep(&ch->napi))) |
| 2249 | for (queue = 0; queue < tx_queues_count; queue++) | 2250 | __napi_schedule(&ch->napi); |
| 2250 | stmmac_tx_clean(priv, queue); | ||
| 2251 | } | 2251 | } |
| 2252 | 2252 | ||
| 2253 | /** | 2253 | /** |
| @@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t) | |||
| 2260 | */ | 2260 | */ |
| 2261 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) | 2261 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) |
| 2262 | { | 2262 | { |
| 2263 | u32 tx_channel_count = priv->plat->tx_queues_to_use; | ||
| 2264 | u32 chan; | ||
| 2265 | |||
| 2263 | priv->tx_coal_frames = STMMAC_TX_FRAMES; | 2266 | priv->tx_coal_frames = STMMAC_TX_FRAMES; |
| 2264 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; | 2267 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; |
| 2265 | timer_setup(&priv->txtimer, stmmac_tx_timer, 0); | 2268 | |
| 2266 | priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); | 2269 | for (chan = 0; chan < tx_channel_count; chan++) { |
| 2267 | add_timer(&priv->txtimer); | 2270 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
| 2271 | |||
| 2272 | timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); | ||
| 2273 | } | ||
| 2268 | } | 2274 | } |
| 2269 | 2275 | ||
| 2270 | static void stmmac_set_rings_length(struct stmmac_priv *priv) | 2276 | static void stmmac_set_rings_length(struct stmmac_priv *priv) |
| @@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev) | |||
| 2592 | static int stmmac_open(struct net_device *dev) | 2598 | static int stmmac_open(struct net_device *dev) |
| 2593 | { | 2599 | { |
| 2594 | struct stmmac_priv *priv = netdev_priv(dev); | 2600 | struct stmmac_priv *priv = netdev_priv(dev); |
| 2601 | u32 chan; | ||
| 2595 | int ret; | 2602 | int ret; |
| 2596 | 2603 | ||
| 2597 | stmmac_check_ether_addr(priv); | 2604 | stmmac_check_ether_addr(priv); |
| @@ -2688,7 +2695,9 @@ irq_error: | |||
| 2688 | if (dev->phydev) | 2695 | if (dev->phydev) |
| 2689 | phy_stop(dev->phydev); | 2696 | phy_stop(dev->phydev); |
| 2690 | 2697 | ||
| 2691 | del_timer_sync(&priv->txtimer); | 2698 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
| 2699 | del_timer_sync(&priv->tx_queue[chan].txtimer); | ||
| 2700 | |||
| 2692 | stmmac_hw_teardown(dev); | 2701 | stmmac_hw_teardown(dev); |
| 2693 | init_error: | 2702 | init_error: |
| 2694 | free_dma_desc_resources(priv); | 2703 | free_dma_desc_resources(priv); |
| @@ -2708,6 +2717,7 @@ dma_desc_error: | |||
| 2708 | static int stmmac_release(struct net_device *dev) | 2717 | static int stmmac_release(struct net_device *dev) |
| 2709 | { | 2718 | { |
| 2710 | struct stmmac_priv *priv = netdev_priv(dev); | 2719 | struct stmmac_priv *priv = netdev_priv(dev); |
| 2720 | u32 chan; | ||
| 2711 | 2721 | ||
| 2712 | if (priv->eee_enabled) | 2722 | if (priv->eee_enabled) |
| 2713 | del_timer_sync(&priv->eee_ctrl_timer); | 2723 | del_timer_sync(&priv->eee_ctrl_timer); |
| @@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev) | |||
| 2722 | 2732 | ||
| 2723 | stmmac_disable_all_queues(priv); | 2733 | stmmac_disable_all_queues(priv); |
| 2724 | 2734 | ||
| 2725 | del_timer_sync(&priv->txtimer); | 2735 | for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) |
| 2736 | del_timer_sync(&priv->tx_queue[chan].txtimer); | ||
| 2726 | 2737 | ||
| 2727 | /* Free the IRQ lines */ | 2738 | /* Free the IRQ lines */ |
| 2728 | free_irq(dev->irq, dev); | 2739 | free_irq(dev->irq, dev); |
| @@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2936 | priv->xstats.tx_tso_nfrags += nfrags; | 2947 | priv->xstats.tx_tso_nfrags += nfrags; |
| 2937 | 2948 | ||
| 2938 | /* Manage tx mitigation */ | 2949 | /* Manage tx mitigation */ |
| 2939 | priv->tx_count_frames += nfrags + 1; | 2950 | tx_q->tx_count_frames += nfrags + 1; |
| 2940 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | 2951 | if (priv->tx_coal_frames <= tx_q->tx_count_frames) { |
| 2941 | mod_timer(&priv->txtimer, | ||
| 2942 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
| 2943 | } else { | ||
| 2944 | priv->tx_count_frames = 0; | ||
| 2945 | stmmac_set_tx_ic(priv, desc); | 2952 | stmmac_set_tx_ic(priv, desc); |
| 2946 | priv->xstats.tx_set_ic_bit++; | 2953 | priv->xstats.tx_set_ic_bit++; |
| 2954 | tx_q->tx_count_frames = 0; | ||
| 2955 | } else { | ||
| 2956 | stmmac_tx_timer_arm(priv, queue); | ||
| 2947 | } | 2957 | } |
| 2948 | 2958 | ||
| 2949 | skb_tx_timestamp(skb); | 2959 | skb_tx_timestamp(skb); |
| @@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2992 | 3002 | ||
| 2993 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3003 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
| 2994 | 3004 | ||
| 3005 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); | ||
| 2995 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); | 3006 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
| 2996 | 3007 | ||
| 2997 | return NETDEV_TX_OK; | 3008 | return NETDEV_TX_OK; |
| @@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3146 | * This approach takes care about the fragments: desc is the first | 3157 | * This approach takes care about the fragments: desc is the first |
| 3147 | * element in case of no SG. | 3158 | * element in case of no SG. |
| 3148 | */ | 3159 | */ |
| 3149 | priv->tx_count_frames += nfrags + 1; | 3160 | tx_q->tx_count_frames += nfrags + 1; |
| 3150 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | 3161 | if (priv->tx_coal_frames <= tx_q->tx_count_frames) { |
| 3151 | mod_timer(&priv->txtimer, | ||
| 3152 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | ||
| 3153 | } else { | ||
| 3154 | priv->tx_count_frames = 0; | ||
| 3155 | stmmac_set_tx_ic(priv, desc); | 3162 | stmmac_set_tx_ic(priv, desc); |
| 3156 | priv->xstats.tx_set_ic_bit++; | 3163 | priv->xstats.tx_set_ic_bit++; |
| 3164 | tx_q->tx_count_frames = 0; | ||
| 3165 | } else { | ||
| 3166 | stmmac_tx_timer_arm(priv, queue); | ||
| 3157 | } | 3167 | } |
| 3158 | 3168 | ||
| 3159 | skb_tx_timestamp(skb); | 3169 | skb_tx_timestamp(skb); |
| @@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3199 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); | 3209 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
| 3200 | 3210 | ||
| 3201 | stmmac_enable_dma_transmission(priv, priv->ioaddr); | 3211 | stmmac_enable_dma_transmission(priv, priv->ioaddr); |
| 3212 | |||
| 3213 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); | ||
| 3202 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); | 3214 | stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); |
| 3203 | 3215 | ||
| 3204 | return NETDEV_TX_OK; | 3216 | return NETDEV_TX_OK; |
| @@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) | |||
| 3319 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | 3331 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
| 3320 | { | 3332 | { |
| 3321 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 3333 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
| 3334 | struct stmmac_channel *ch = &priv->channel[queue]; | ||
| 3322 | unsigned int entry = rx_q->cur_rx; | 3335 | unsigned int entry = rx_q->cur_rx; |
| 3323 | int coe = priv->hw->rx_csum; | 3336 | int coe = priv->hw->rx_csum; |
| 3324 | unsigned int next_entry; | 3337 | unsigned int next_entry; |
| @@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3491 | else | 3504 | else |
| 3492 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 3505 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 3493 | 3506 | ||
| 3494 | napi_gro_receive(&rx_q->napi, skb); | 3507 | napi_gro_receive(&ch->napi, skb); |
| 3495 | 3508 | ||
| 3496 | priv->dev->stats.rx_packets++; | 3509 | priv->dev->stats.rx_packets++; |
| 3497 | priv->dev->stats.rx_bytes += frame_len; | 3510 | priv->dev->stats.rx_bytes += frame_len; |
| @@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
| 3514 | * Description : | 3527 | * Description : |
| 3515 | * To look at the incoming frames and clear the tx resources. | 3528 | * To look at the incoming frames and clear the tx resources. |
| 3516 | */ | 3529 | */ |
| 3517 | static int stmmac_poll(struct napi_struct *napi, int budget) | 3530 | static int stmmac_napi_poll(struct napi_struct *napi, int budget) |
| 3518 | { | 3531 | { |
| 3519 | struct stmmac_rx_queue *rx_q = | 3532 | struct stmmac_channel *ch = |
| 3520 | container_of(napi, struct stmmac_rx_queue, napi); | 3533 | container_of(napi, struct stmmac_channel, napi); |
| 3521 | struct stmmac_priv *priv = rx_q->priv_data; | 3534 | struct stmmac_priv *priv = ch->priv_data; |
| 3522 | u32 tx_count = priv->plat->tx_queues_to_use; | 3535 | int work_done = 0, work_rem = budget; |
| 3523 | u32 chan = rx_q->queue_index; | 3536 | u32 chan = ch->index; |
| 3524 | int work_done = 0; | ||
| 3525 | u32 queue; | ||
| 3526 | 3537 | ||
| 3527 | priv->xstats.napi_poll++; | 3538 | priv->xstats.napi_poll++; |
| 3528 | 3539 | ||
| 3529 | /* check all the queues */ | 3540 | if (ch->has_tx) { |
| 3530 | for (queue = 0; queue < tx_count; queue++) | 3541 | int done = stmmac_tx_clean(priv, work_rem, chan); |
| 3531 | stmmac_tx_clean(priv, queue); | ||
| 3532 | 3542 | ||
| 3533 | work_done = stmmac_rx(priv, budget, rx_q->queue_index); | 3543 | work_done += done; |
| 3534 | if (work_done < budget) { | 3544 | work_rem -= done; |
| 3535 | napi_complete_done(napi, work_done); | 3545 | } |
| 3536 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | 3546 | |
| 3547 | if (ch->has_rx) { | ||
| 3548 | int done = stmmac_rx(priv, work_rem, chan); | ||
| 3549 | |||
| 3550 | work_done += done; | ||
| 3551 | work_rem -= done; | ||
| 3537 | } | 3552 | } |
| 3553 | |||
| 3554 | if (work_done < budget && napi_complete_done(napi, work_done)) | ||
| 3555 | stmmac_enable_dma_irq(priv, priv->ioaddr, chan); | ||
| 3556 | |||
| 3538 | return work_done; | 3557 | return work_done; |
| 3539 | } | 3558 | } |
| 3540 | 3559 | ||
| @@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4198 | { | 4217 | { |
| 4199 | struct net_device *ndev = NULL; | 4218 | struct net_device *ndev = NULL; |
| 4200 | struct stmmac_priv *priv; | 4219 | struct stmmac_priv *priv; |
| 4220 | u32 queue, maxq; | ||
| 4201 | int ret = 0; | 4221 | int ret = 0; |
| 4202 | u32 queue; | ||
| 4203 | 4222 | ||
| 4204 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), | 4223 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
| 4205 | MTL_MAX_TX_QUEUES, | 4224 | MTL_MAX_TX_QUEUES, |
| @@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device, | |||
| 4322 | "Enable RX Mitigation via HW Watchdog Timer\n"); | 4341 | "Enable RX Mitigation via HW Watchdog Timer\n"); |
| 4323 | } | 4342 | } |
| 4324 | 4343 | ||
| 4325 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { | 4344 | /* Setup channels NAPI */ |
| 4326 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 4345 | maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); |
| 4327 | 4346 | ||
| 4328 | netif_napi_add(ndev, &rx_q->napi, stmmac_poll, | 4347 | for (queue = 0; queue < maxq; queue++) { |
| 4329 | (8 * priv->plat->rx_queues_to_use)); | 4348 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 4349 | |||
| 4350 | ch->priv_data = priv; | ||
| 4351 | ch->index = queue; | ||
| 4352 | |||
| 4353 | if (queue < priv->plat->rx_queues_to_use) | ||
| 4354 | ch->has_rx = true; | ||
| 4355 | if (queue < priv->plat->tx_queues_to_use) | ||
| 4356 | ch->has_tx = true; | ||
| 4357 | |||
| 4358 | netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, | ||
| 4359 | NAPI_POLL_WEIGHT); | ||
| 4330 | } | 4360 | } |
| 4331 | 4361 | ||
| 4332 | mutex_init(&priv->lock); | 4362 | mutex_init(&priv->lock); |
| @@ -4372,10 +4402,10 @@ error_netdev_register: | |||
| 4372 | priv->hw->pcs != STMMAC_PCS_RTBI) | 4402 | priv->hw->pcs != STMMAC_PCS_RTBI) |
| 4373 | stmmac_mdio_unregister(ndev); | 4403 | stmmac_mdio_unregister(ndev); |
| 4374 | error_mdio_register: | 4404 | error_mdio_register: |
| 4375 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { | 4405 | for (queue = 0; queue < maxq; queue++) { |
| 4376 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | 4406 | struct stmmac_channel *ch = &priv->channel[queue]; |
| 4377 | 4407 | ||
| 4378 | netif_napi_del(&rx_q->napi); | 4408 | netif_napi_del(&ch->napi); |
| 4379 | } | 4409 | } |
| 4380 | error_hw_init: | 4410 | error_hw_init: |
| 4381 | destroy_workqueue(priv->wq); | 4411 | destroy_workqueue(priv->wq); |
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 2bdfb39215e9..d8ba512f166a 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c | |||
| @@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work) | |||
| 835 | w5100_tx_skb(priv->ndev, skb); | 835 | w5100_tx_skb(priv->ndev, skb); |
| 836 | } | 836 | } |
| 837 | 837 | ||
| 838 | static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) | 838 | static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) |
| 839 | { | 839 | { |
| 840 | struct w5100_priv *priv = netdev_priv(ndev); | 840 | struct w5100_priv *priv = netdev_priv(ndev); |
| 841 | 841 | ||
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 56ae573001e8..80fdbff67d82 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c | |||
| @@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev) | |||
| 365 | netif_wake_queue(ndev); | 365 | netif_wake_queue(ndev); |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) | 368 | static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) |
| 369 | { | 369 | { |
| 370 | struct w5300_priv *priv = netdev_priv(ndev); | 370 | struct w5300_priv *priv = netdev_priv(ndev); |
| 371 | 371 | ||
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 740655261e5b..83060fb349f4 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
| @@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
| 349 | } | 349 | } |
| 350 | if (bus->started) | 350 | if (bus->started) |
| 351 | bus->socket_ops->start(bus->sfp); | 351 | bus->socket_ops->start(bus->sfp); |
| 352 | bus->netdev->sfp_bus = bus; | ||
| 352 | bus->registered = true; | 353 | bus->registered = true; |
| 353 | return 0; | 354 | return 0; |
| 354 | } | 355 | } |
| @@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
| 357 | { | 358 | { |
| 358 | const struct sfp_upstream_ops *ops = bus->upstream_ops; | 359 | const struct sfp_upstream_ops *ops = bus->upstream_ops; |
| 359 | 360 | ||
| 361 | bus->netdev->sfp_bus = NULL; | ||
| 360 | if (bus->registered) { | 362 | if (bus->registered) { |
| 361 | if (bus->started) | 363 | if (bus->started) |
| 362 | bus->socket_ops->stop(bus->sfp); | 364 | bus->socket_ops->stop(bus->sfp); |
| @@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus) | |||
| 438 | { | 440 | { |
| 439 | bus->upstream_ops = NULL; | 441 | bus->upstream_ops = NULL; |
| 440 | bus->upstream = NULL; | 442 | bus->upstream = NULL; |
| 441 | bus->netdev->sfp_bus = NULL; | ||
| 442 | bus->netdev = NULL; | 443 | bus->netdev = NULL; |
| 443 | } | 444 | } |
| 444 | 445 | ||
| @@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, | |||
| 467 | bus->upstream_ops = ops; | 468 | bus->upstream_ops = ops; |
| 468 | bus->upstream = upstream; | 469 | bus->upstream = upstream; |
| 469 | bus->netdev = ndev; | 470 | bus->netdev = ndev; |
| 470 | ndev->sfp_bus = bus; | ||
| 471 | 471 | ||
| 472 | if (bus->sfp) { | 472 | if (bus->sfp) { |
| 473 | ret = sfp_register_bus(bus); | 473 | ret = sfp_register_bus(bus); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ebd07ad82431..e2648b5a3861 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1153,43 +1153,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev, | |||
| 1153 | 1153 | ||
| 1154 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); | 1154 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); |
| 1155 | } | 1155 | } |
| 1156 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1157 | static void tun_poll_controller(struct net_device *dev) | ||
| 1158 | { | ||
| 1159 | /* | ||
| 1160 | * Tun only receives frames when: | ||
| 1161 | * 1) the char device endpoint gets data from user space | ||
| 1162 | * 2) the tun socket gets a sendmsg call from user space | ||
| 1163 | * If NAPI is not enabled, since both of those are synchronous | ||
| 1164 | * operations, we are guaranteed never to have pending data when we poll | ||
| 1165 | * for it so there is nothing to do here but return. | ||
| 1166 | * We need this though so netpoll recognizes us as an interface that | ||
| 1167 | * supports polling, which enables bridge devices in virt setups to | ||
| 1168 | * still use netconsole | ||
| 1169 | * If NAPI is enabled, however, we need to schedule polling for all | ||
| 1170 | * queues unless we are using napi_gro_frags(), which we call in | ||
| 1171 | * process context and not in NAPI context. | ||
| 1172 | */ | ||
| 1173 | struct tun_struct *tun = netdev_priv(dev); | ||
| 1174 | |||
| 1175 | if (tun->flags & IFF_NAPI) { | ||
| 1176 | struct tun_file *tfile; | ||
| 1177 | int i; | ||
| 1178 | |||
| 1179 | if (tun_napi_frags_enabled(tun)) | ||
| 1180 | return; | ||
| 1181 | |||
| 1182 | rcu_read_lock(); | ||
| 1183 | for (i = 0; i < tun->numqueues; i++) { | ||
| 1184 | tfile = rcu_dereference(tun->tfiles[i]); | ||
| 1185 | if (tfile->napi_enabled) | ||
| 1186 | napi_schedule(&tfile->napi); | ||
| 1187 | } | ||
| 1188 | rcu_read_unlock(); | ||
| 1189 | } | ||
| 1190 | return; | ||
| 1191 | } | ||
| 1192 | #endif | ||
| 1193 | 1156 | ||
| 1194 | static void tun_set_headroom(struct net_device *dev, int new_hr) | 1157 | static void tun_set_headroom(struct net_device *dev, int new_hr) |
| 1195 | { | 1158 | { |
| @@ -1283,9 +1246,6 @@ static const struct net_device_ops tun_netdev_ops = { | |||
| 1283 | .ndo_start_xmit = tun_net_xmit, | 1246 | .ndo_start_xmit = tun_net_xmit, |
| 1284 | .ndo_fix_features = tun_net_fix_features, | 1247 | .ndo_fix_features = tun_net_fix_features, |
| 1285 | .ndo_select_queue = tun_select_queue, | 1248 | .ndo_select_queue = tun_select_queue, |
| 1286 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1287 | .ndo_poll_controller = tun_poll_controller, | ||
| 1288 | #endif | ||
| 1289 | .ndo_set_rx_headroom = tun_set_headroom, | 1249 | .ndo_set_rx_headroom = tun_set_headroom, |
| 1290 | .ndo_get_stats64 = tun_net_get_stats64, | 1250 | .ndo_get_stats64 = tun_net_get_stats64, |
| 1291 | }; | 1251 | }; |
| @@ -1365,9 +1325,6 @@ static const struct net_device_ops tap_netdev_ops = { | |||
| 1365 | .ndo_set_mac_address = eth_mac_addr, | 1325 | .ndo_set_mac_address = eth_mac_addr, |
| 1366 | .ndo_validate_addr = eth_validate_addr, | 1326 | .ndo_validate_addr = eth_validate_addr, |
| 1367 | .ndo_select_queue = tun_select_queue, | 1327 | .ndo_select_queue = tun_select_queue, |
| 1368 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1369 | .ndo_poll_controller = tun_poll_controller, | ||
| 1370 | #endif | ||
| 1371 | .ndo_features_check = passthru_features_check, | 1328 | .ndo_features_check = passthru_features_check, |
| 1372 | .ndo_set_rx_headroom = tun_set_headroom, | 1329 | .ndo_set_rx_headroom = tun_set_headroom, |
| 1373 | .ndo_get_stats64 = tun_net_get_stats64, | 1330 | .ndo_get_stats64 = tun_net_get_stats64, |
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 5a9562881d4e..9fe3fff818b8 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c | |||
| @@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
| 537 | 537 | ||
| 538 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); | 538 | INIT_WORK(&ctrl->ana_work, nvme_ana_work); |
| 539 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); | 539 | ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); |
| 540 | if (!ctrl->ana_log_buf) | 540 | if (!ctrl->ana_log_buf) { |
| 541 | error = -ENOMEM; | ||
| 541 | goto out; | 542 | goto out; |
| 543 | } | ||
| 542 | 544 | ||
| 543 | error = nvme_read_ana_log(ctrl, true); | 545 | error = nvme_read_ana_log(ctrl, true); |
| 544 | if (error) | 546 | if (error) |
| @@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |||
| 547 | out_free_ana_log_buf: | 549 | out_free_ana_log_buf: |
| 548 | kfree(ctrl->ana_log_buf); | 550 | kfree(ctrl->ana_log_buf); |
| 549 | out: | 551 | out: |
| 550 | return -ENOMEM; | 552 | return error; |
| 551 | } | 553 | } |
| 552 | 554 | ||
| 553 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) | 555 | void nvme_mpath_uninit(struct nvme_ctrl *ctrl) |
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 778c4f76a884..2153956a0b20 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
| @@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, | |||
| 135 | if (val & PCIE_ATU_ENABLE) | 135 | if (val & PCIE_ATU_ENABLE) |
| 136 | return; | 136 | return; |
| 137 | 137 | ||
| 138 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 138 | mdelay(LINK_WAIT_IATU); |
| 139 | } | 139 | } |
| 140 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | 140 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); |
| 141 | } | 141 | } |
| @@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, | |||
| 178 | if (val & PCIE_ATU_ENABLE) | 178 | if (val & PCIE_ATU_ENABLE) |
| 179 | return; | 179 | return; |
| 180 | 180 | ||
| 181 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 181 | mdelay(LINK_WAIT_IATU); |
| 182 | } | 182 | } |
| 183 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | 183 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); |
| 184 | } | 184 | } |
| @@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, | |||
| 236 | if (val & PCIE_ATU_ENABLE) | 236 | if (val & PCIE_ATU_ENABLE) |
| 237 | return 0; | 237 | return 0; |
| 238 | 238 | ||
| 239 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 239 | mdelay(LINK_WAIT_IATU); |
| 240 | } | 240 | } |
| 241 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | 241 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); |
| 242 | 242 | ||
| @@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | |||
| 282 | if (val & PCIE_ATU_ENABLE) | 282 | if (val & PCIE_ATU_ENABLE) |
| 283 | return 0; | 283 | return 0; |
| 284 | 284 | ||
| 285 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | 285 | mdelay(LINK_WAIT_IATU); |
| 286 | } | 286 | } |
| 287 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | 287 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); |
| 288 | 288 | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 96126fd8403c..9f1a5e399b70 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
| @@ -26,8 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | /* Parameters for the waiting for iATU enabled routine */ | 27 | /* Parameters for the waiting for iATU enabled routine */ |
| 28 | #define LINK_WAIT_MAX_IATU_RETRIES 5 | 28 | #define LINK_WAIT_MAX_IATU_RETRIES 5 |
| 29 | #define LINK_WAIT_IATU_MIN 9000 | 29 | #define LINK_WAIT_IATU 9 |
| 30 | #define LINK_WAIT_IATU_MAX 10000 | ||
| 31 | 30 | ||
| 32 | /* Synopsys-specific PCIe configuration registers */ | 31 | /* Synopsys-specific PCIe configuration registers */ |
| 33 | #define PCIE_PORT_LINK_CONTROL 0x710 | 32 | #define PCIE_PORT_LINK_CONTROL 0x710 |
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index ee80e79db21a..9ba4d12c179c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c | |||
| @@ -1484,8 +1484,10 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) | |||
| 1484 | snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); | 1484 | snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); |
| 1485 | hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, | 1485 | hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, |
| 1486 | name, NULL); | 1486 | name, NULL); |
| 1487 | if (!hpdev->pci_slot) | 1487 | if (IS_ERR(hpdev->pci_slot)) { |
| 1488 | pr_warn("pci_create slot %s failed\n", name); | 1488 | pr_warn("pci_create slot %s failed\n", name); |
| 1489 | hpdev->pci_slot = NULL; | ||
| 1490 | } | ||
| 1489 | } | 1491 | } |
| 1490 | } | 1492 | } |
| 1491 | 1493 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index ef0b1b6ba86f..12afa7fdf77e 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
| @@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge) | |||
| 457 | /** | 457 | /** |
| 458 | * enable_slot - enable, configure a slot | 458 | * enable_slot - enable, configure a slot |
| 459 | * @slot: slot to be enabled | 459 | * @slot: slot to be enabled |
| 460 | * @bridge: true if enable is for the whole bridge (not a single slot) | ||
| 460 | * | 461 | * |
| 461 | * This function should be called per *physical slot*, | 462 | * This function should be called per *physical slot*, |
| 462 | * not per each slot object in ACPI namespace. | 463 | * not per each slot object in ACPI namespace. |
| 463 | */ | 464 | */ |
| 464 | static void enable_slot(struct acpiphp_slot *slot) | 465 | static void enable_slot(struct acpiphp_slot *slot, bool bridge) |
| 465 | { | 466 | { |
| 466 | struct pci_dev *dev; | 467 | struct pci_dev *dev; |
| 467 | struct pci_bus *bus = slot->bus; | 468 | struct pci_bus *bus = slot->bus; |
| 468 | struct acpiphp_func *func; | 469 | struct acpiphp_func *func; |
| 469 | 470 | ||
| 470 | if (bus->self && hotplug_is_native(bus->self)) { | 471 | if (bridge && bus->self && hotplug_is_native(bus->self)) { |
| 471 | /* | 472 | /* |
| 472 | * If native hotplug is used, it will take care of hotplug | 473 | * If native hotplug is used, it will take care of hotplug |
| 473 | * slot management and resource allocation for hotplug | 474 | * slot management and resource allocation for hotplug |
| @@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge) | |||
| 701 | trim_stale_devices(dev); | 702 | trim_stale_devices(dev); |
| 702 | 703 | ||
| 703 | /* configure all functions */ | 704 | /* configure all functions */ |
| 704 | enable_slot(slot); | 705 | enable_slot(slot, true); |
| 705 | } else { | 706 | } else { |
| 706 | disable_slot(slot); | 707 | disable_slot(slot); |
| 707 | } | 708 | } |
| @@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context) | |||
| 785 | if (bridge) | 786 | if (bridge) |
| 786 | acpiphp_check_bridge(bridge); | 787 | acpiphp_check_bridge(bridge); |
| 787 | else if (!(slot->flags & SLOT_IS_GOING_AWAY)) | 788 | else if (!(slot->flags & SLOT_IS_GOING_AWAY)) |
| 788 | enable_slot(slot); | 789 | enable_slot(slot, false); |
| 789 | 790 | ||
| 790 | break; | 791 | break; |
| 791 | 792 | ||
| @@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) | |||
| 973 | 974 | ||
| 974 | /* configure all functions */ | 975 | /* configure all functions */ |
| 975 | if (!(slot->flags & SLOT_ENABLED)) | 976 | if (!(slot->flags & SLOT_ENABLED)) |
| 976 | enable_slot(slot); | 977 | enable_slot(slot, false); |
| 977 | 978 | ||
| 978 | pci_unlock_rescan_remove(); | 979 | pci_unlock_rescan_remove(); |
| 979 | return 0; | 980 | return 0; |
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c index 8d48371caaa2..e7f45d96b0cb 100644 --- a/drivers/pinctrl/intel/pinctrl-cannonlake.c +++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c | |||
| @@ -15,10 +15,11 @@ | |||
| 15 | 15 | ||
| 16 | #include "pinctrl-intel.h" | 16 | #include "pinctrl-intel.h" |
| 17 | 17 | ||
| 18 | #define CNL_PAD_OWN 0x020 | 18 | #define CNL_PAD_OWN 0x020 |
| 19 | #define CNL_PADCFGLOCK 0x080 | 19 | #define CNL_PADCFGLOCK 0x080 |
| 20 | #define CNL_HOSTSW_OWN 0x0b0 | 20 | #define CNL_LP_HOSTSW_OWN 0x0b0 |
| 21 | #define CNL_GPI_IE 0x120 | 21 | #define CNL_H_HOSTSW_OWN 0x0c0 |
| 22 | #define CNL_GPI_IE 0x120 | ||
| 22 | 23 | ||
| 23 | #define CNL_GPP(r, s, e, g) \ | 24 | #define CNL_GPP(r, s, e, g) \ |
| 24 | { \ | 25 | { \ |
| @@ -30,12 +31,12 @@ | |||
| 30 | 31 | ||
| 31 | #define CNL_NO_GPIO -1 | 32 | #define CNL_NO_GPIO -1 |
| 32 | 33 | ||
| 33 | #define CNL_COMMUNITY(b, s, e, g) \ | 34 | #define CNL_COMMUNITY(b, s, e, o, g) \ |
| 34 | { \ | 35 | { \ |
| 35 | .barno = (b), \ | 36 | .barno = (b), \ |
| 36 | .padown_offset = CNL_PAD_OWN, \ | 37 | .padown_offset = CNL_PAD_OWN, \ |
| 37 | .padcfglock_offset = CNL_PADCFGLOCK, \ | 38 | .padcfglock_offset = CNL_PADCFGLOCK, \ |
| 38 | .hostown_offset = CNL_HOSTSW_OWN, \ | 39 | .hostown_offset = (o), \ |
| 39 | .ie_offset = CNL_GPI_IE, \ | 40 | .ie_offset = CNL_GPI_IE, \ |
| 40 | .pin_base = (s), \ | 41 | .pin_base = (s), \ |
| 41 | .npins = ((e) - (s) + 1), \ | 42 | .npins = ((e) - (s) + 1), \ |
| @@ -43,6 +44,12 @@ | |||
| 43 | .ngpps = ARRAY_SIZE(g), \ | 44 | .ngpps = ARRAY_SIZE(g), \ |
| 44 | } | 45 | } |
| 45 | 46 | ||
| 47 | #define CNLLP_COMMUNITY(b, s, e, g) \ | ||
| 48 | CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g) | ||
| 49 | |||
| 50 | #define CNLH_COMMUNITY(b, s, e, g) \ | ||
| 51 | CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g) | ||
| 52 | |||
| 46 | /* Cannon Lake-H */ | 53 | /* Cannon Lake-H */ |
| 47 | static const struct pinctrl_pin_desc cnlh_pins[] = { | 54 | static const struct pinctrl_pin_desc cnlh_pins[] = { |
| 48 | /* GPP_A */ | 55 | /* GPP_A */ |
| @@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = { | |||
| 442 | }; | 449 | }; |
| 443 | 450 | ||
| 444 | static const struct intel_community cnlh_communities[] = { | 451 | static const struct intel_community cnlh_communities[] = { |
| 445 | CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps), | 452 | CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps), |
| 446 | CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps), | 453 | CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps), |
| 447 | CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps), | 454 | CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps), |
| 448 | CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps), | 455 | CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps), |
| 449 | }; | 456 | }; |
| 450 | 457 | ||
| 451 | static const struct intel_pinctrl_soc_data cnlh_soc_data = { | 458 | static const struct intel_pinctrl_soc_data cnlh_soc_data = { |
| @@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = { | |||
| 803 | }; | 810 | }; |
| 804 | 811 | ||
| 805 | static const struct intel_community cnllp_communities[] = { | 812 | static const struct intel_community cnllp_communities[] = { |
| 806 | CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps), | 813 | CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps), |
| 807 | CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps), | 814 | CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps), |
| 808 | CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps), | 815 | CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps), |
| 809 | }; | 816 | }; |
| 810 | 817 | ||
| 811 | static const struct intel_pinctrl_soc_data cnllp_soc_data = { | 818 | static const struct intel_pinctrl_soc_data cnllp_soc_data = { |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index ec8dafc94694..1ea3438ea67e 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
| @@ -887,36 +887,6 @@ static const struct gpio_chip intel_gpio_chip = { | |||
| 887 | .set_config = gpiochip_generic_config, | 887 | .set_config = gpiochip_generic_config, |
| 888 | }; | 888 | }; |
| 889 | 889 | ||
| 890 | static int intel_gpio_irq_reqres(struct irq_data *d) | ||
| 891 | { | ||
| 892 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
| 893 | struct intel_pinctrl *pctrl = gpiochip_get_data(gc); | ||
| 894 | int pin; | ||
| 895 | int ret; | ||
| 896 | |||
| 897 | pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); | ||
| 898 | if (pin >= 0) { | ||
| 899 | ret = gpiochip_lock_as_irq(gc, pin); | ||
| 900 | if (ret) { | ||
| 901 | dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n", | ||
| 902 | pin); | ||
| 903 | return ret; | ||
| 904 | } | ||
| 905 | } | ||
| 906 | return 0; | ||
| 907 | } | ||
| 908 | |||
| 909 | static void intel_gpio_irq_relres(struct irq_data *d) | ||
| 910 | { | ||
| 911 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
| 912 | struct intel_pinctrl *pctrl = gpiochip_get_data(gc); | ||
| 913 | int pin; | ||
| 914 | |||
| 915 | pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); | ||
| 916 | if (pin >= 0) | ||
| 917 | gpiochip_unlock_as_irq(gc, pin); | ||
| 918 | } | ||
| 919 | |||
| 920 | static void intel_gpio_irq_ack(struct irq_data *d) | 890 | static void intel_gpio_irq_ack(struct irq_data *d) |
| 921 | { | 891 | { |
| 922 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 892 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| @@ -1132,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) | |||
| 1132 | 1102 | ||
| 1133 | static struct irq_chip intel_gpio_irqchip = { | 1103 | static struct irq_chip intel_gpio_irqchip = { |
| 1134 | .name = "intel-gpio", | 1104 | .name = "intel-gpio", |
| 1135 | .irq_request_resources = intel_gpio_irq_reqres, | ||
| 1136 | .irq_release_resources = intel_gpio_irq_relres, | ||
| 1137 | .irq_enable = intel_gpio_irq_enable, | 1105 | .irq_enable = intel_gpio_irq_enable, |
| 1138 | .irq_ack = intel_gpio_irq_ack, | 1106 | .irq_ack = intel_gpio_irq_ack, |
| 1139 | .irq_mask = intel_gpio_irq_mask, | 1107 | .irq_mask = intel_gpio_irq_mask, |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 41ccc759b8b8..1425c2874d40 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
| @@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d) | |||
| 348 | unsigned long flags; | 348 | unsigned long flags; |
| 349 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 349 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 350 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); | 350 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); |
| 351 | u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF); | ||
| 352 | 351 | ||
| 353 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | 352 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); |
| 354 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); | 353 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); |
| 355 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); | 354 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); |
| 356 | pin_reg |= BIT(INTERRUPT_MASK_OFF); | 355 | pin_reg |= BIT(INTERRUPT_MASK_OFF); |
| 357 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 356 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
| 358 | /* | ||
| 359 | * When debounce logic is enabled it takes ~900 us before interrupts | ||
| 360 | * can be enabled. During this "debounce warm up" period the | ||
| 361 | * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it | ||
| 362 | * reads back as 1, signaling that interrupts are now enabled. | ||
| 363 | */ | ||
| 364 | while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask) | ||
| 365 | continue; | ||
| 366 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 357 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
| 367 | } | 358 | } |
| 368 | 359 | ||
| @@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d) | |||
| 426 | static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) | 417 | static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) |
| 427 | { | 418 | { |
| 428 | int ret = 0; | 419 | int ret = 0; |
| 429 | u32 pin_reg; | 420 | u32 pin_reg, pin_reg_irq_en, mask; |
| 430 | unsigned long flags, irq_flags; | 421 | unsigned long flags, irq_flags; |
| 431 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 422 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| 432 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); | 423 | struct amd_gpio *gpio_dev = gpiochip_get_data(gc); |
| @@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) | |||
| 495 | } | 486 | } |
| 496 | 487 | ||
| 497 | pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; | 488 | pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; |
| 489 | /* | ||
| 490 | * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the | ||
| 491 | * debounce registers of any GPIO will block wake/interrupt status | ||
| 492 | * generation for *all* GPIOs for a lenght of time that depends on | ||
| 493 | * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the | ||
| 494 | * INTERRUPT_ENABLE bit will read as 0. | ||
| 495 | * | ||
| 496 | * We temporarily enable irq for the GPIO whose configuration is | ||
| 497 | * changing, and then wait for it to read back as 1 to know when | ||
| 498 | * debounce has settled and then disable the irq again. | ||
| 499 | * We do this polling with the spinlock held to ensure other GPIO | ||
| 500 | * access routines do not read an incorrect value for the irq enable | ||
| 501 | * bit of other GPIOs. We keep the GPIO masked while polling to avoid | ||
| 502 | * spurious irqs, and disable the irq again after polling. | ||
| 503 | */ | ||
| 504 | mask = BIT(INTERRUPT_ENABLE_OFF); | ||
| 505 | pin_reg_irq_en = pin_reg; | ||
| 506 | pin_reg_irq_en |= mask; | ||
| 507 | pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF); | ||
| 508 | writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4); | ||
| 509 | while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask) | ||
| 510 | continue; | ||
| 498 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 511 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
| 499 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | 512 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); |
| 500 | 513 | ||
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c index 0f8ac8dec3e1..a1bd8aaf4d98 100644 --- a/drivers/regulator/bd71837-regulator.c +++ b/drivers/regulator/bd71837-regulator.c | |||
| @@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev) | |||
| 569 | BD71837_REG_REGLOCK); | 569 | BD71837_REG_REGLOCK); |
| 570 | } | 570 | } |
| 571 | 571 | ||
| 572 | /* | ||
| 573 | * There is a HW quirk in BD71837. The shutdown sequence timings for | ||
| 574 | * bucks/LDOs which are controlled via register interface are changed. | ||
| 575 | * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the | ||
| 576 | * beginning of shut-down sequence. As bucks 6 and 7 are parent | ||
| 577 | * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage | ||
| 578 | * monitoring to errorneously detect under voltage and force PMIC to | ||
| 579 | * emergency state instead of poweroff. In order to avoid this we | ||
| 580 | * disable voltage monitoring for LDO5 and LDO6 | ||
| 581 | */ | ||
| 582 | err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2, | ||
| 583 | BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80, | ||
| 584 | BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80); | ||
| 585 | if (err) { | ||
| 586 | dev_err(&pmic->pdev->dev, | ||
| 587 | "Failed to disable voltage monitoring\n"); | ||
| 588 | goto err; | ||
| 589 | } | ||
| 590 | |||
| 572 | for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { | 591 | for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { |
| 573 | 592 | ||
| 574 | struct regulator_desc *desc; | 593 | struct regulator_desc *desc; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index bb1324f93143..9577d8941846 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
| @@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev, | |||
| 3161 | if (!rstate->changeable) | 3161 | if (!rstate->changeable) |
| 3162 | return -EPERM; | 3162 | return -EPERM; |
| 3163 | 3163 | ||
| 3164 | rstate->enabled = en; | 3164 | rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND; |
| 3165 | 3165 | ||
| 3166 | return 0; | 3166 | return 0; |
| 3167 | } | 3167 | } |
| @@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc, | |||
| 4395 | !rdev->desc->fixed_uV) | 4395 | !rdev->desc->fixed_uV) |
| 4396 | rdev->is_switch = true; | 4396 | rdev->is_switch = true; |
| 4397 | 4397 | ||
| 4398 | dev_set_drvdata(&rdev->dev, rdev); | ||
| 4398 | ret = device_register(&rdev->dev); | 4399 | ret = device_register(&rdev->dev); |
| 4399 | if (ret != 0) { | 4400 | if (ret != 0) { |
| 4400 | put_device(&rdev->dev); | 4401 | put_device(&rdev->dev); |
| 4401 | goto unset_supplies; | 4402 | goto unset_supplies; |
| 4402 | } | 4403 | } |
| 4403 | 4404 | ||
| 4404 | dev_set_drvdata(&rdev->dev, rdev); | ||
| 4405 | rdev_init_debugfs(rdev); | 4405 | rdev_init_debugfs(rdev); |
| 4406 | 4406 | ||
| 4407 | /* try to resolve regulators supply since a new one was registered */ | 4407 | /* try to resolve regulators supply since a new one was registered */ |
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 638f17d4c848..210fc20f7de7 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c | |||
| @@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np, | |||
| 213 | else if (of_property_read_bool(suspend_np, | 213 | else if (of_property_read_bool(suspend_np, |
| 214 | "regulator-off-in-suspend")) | 214 | "regulator-off-in-suspend")) |
| 215 | suspend_state->enabled = DISABLE_IN_SUSPEND; | 215 | suspend_state->enabled = DISABLE_IN_SUSPEND; |
| 216 | else | ||
| 217 | suspend_state->enabled = DO_NOTHING_IN_SUSPEND; | ||
| 218 | 216 | ||
| 219 | if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", | 217 | if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", |
| 220 | &pval)) | 218 | &pval)) |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index fac377320158..f42a619198c4 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
| @@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev, | |||
| 3474 | vscsi->dds.window[LOCAL].liobn, | 3474 | vscsi->dds.window[LOCAL].liobn, |
| 3475 | vscsi->dds.window[REMOTE].liobn); | 3475 | vscsi->dds.window[REMOTE].liobn); |
| 3476 | 3476 | ||
| 3477 | strcpy(vscsi->eye, "VSCSI "); | 3477 | snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name); |
| 3478 | strncat(vscsi->eye, vdev->name, MAX_EYE); | ||
| 3479 | 3478 | ||
| 3480 | vscsi->dds.unit_id = vdev->unit_address; | 3479 | vscsi->dds.unit_id = vdev->unit_address; |
| 3481 | strncpy(vscsi->dds.partition_name, partition_name, | 3480 | strscpy(vscsi->dds.partition_name, partition_name, |
| 3482 | sizeof(vscsi->dds.partition_name)); | 3481 | sizeof(vscsi->dds.partition_name)); |
| 3483 | vscsi->dds.partition_num = partition_number; | 3482 | vscsi->dds.partition_num = partition_number; |
| 3484 | 3483 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f2ec80b0ffc0..271990bc065b 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref) | |||
| 3335 | LEAVE; | 3335 | LEAVE; |
| 3336 | } | 3336 | } |
| 3337 | 3337 | ||
| 3338 | static void ipr_add_remove_thread(struct work_struct *work) | ||
| 3339 | { | ||
| 3340 | unsigned long lock_flags; | ||
| 3341 | struct ipr_resource_entry *res; | ||
| 3342 | struct scsi_device *sdev; | ||
| 3343 | struct ipr_ioa_cfg *ioa_cfg = | ||
| 3344 | container_of(work, struct ipr_ioa_cfg, scsi_add_work_q); | ||
| 3345 | u8 bus, target, lun; | ||
| 3346 | int did_work; | ||
| 3347 | |||
| 3348 | ENTER; | ||
| 3349 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3350 | |||
| 3351 | restart: | ||
| 3352 | do { | ||
| 3353 | did_work = 0; | ||
| 3354 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { | ||
| 3355 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3356 | return; | ||
| 3357 | } | ||
| 3358 | |||
| 3359 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3360 | if (res->del_from_ml && res->sdev) { | ||
| 3361 | did_work = 1; | ||
| 3362 | sdev = res->sdev; | ||
| 3363 | if (!scsi_device_get(sdev)) { | ||
| 3364 | if (!res->add_to_ml) | ||
| 3365 | list_move_tail(&res->queue, &ioa_cfg->free_res_q); | ||
| 3366 | else | ||
| 3367 | res->del_from_ml = 0; | ||
| 3368 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3369 | scsi_remove_device(sdev); | ||
| 3370 | scsi_device_put(sdev); | ||
| 3371 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3372 | } | ||
| 3373 | break; | ||
| 3374 | } | ||
| 3375 | } | ||
| 3376 | } while (did_work); | ||
| 3377 | |||
| 3378 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3379 | if (res->add_to_ml) { | ||
| 3380 | bus = res->bus; | ||
| 3381 | target = res->target; | ||
| 3382 | lun = res->lun; | ||
| 3383 | res->add_to_ml = 0; | ||
| 3384 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3385 | scsi_add_device(ioa_cfg->host, bus, target, lun); | ||
| 3386 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3387 | goto restart; | ||
| 3388 | } | ||
| 3389 | } | ||
| 3390 | |||
| 3391 | ioa_cfg->scan_done = 1; | ||
| 3392 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3393 | kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); | ||
| 3394 | LEAVE; | ||
| 3395 | } | ||
| 3396 | |||
| 3338 | /** | 3397 | /** |
| 3339 | * ipr_worker_thread - Worker thread | 3398 | * ipr_worker_thread - Worker thread |
| 3340 | * @work: ioa config struct | 3399 | * @work: ioa config struct |
| @@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref) | |||
| 3349 | static void ipr_worker_thread(struct work_struct *work) | 3408 | static void ipr_worker_thread(struct work_struct *work) |
| 3350 | { | 3409 | { |
| 3351 | unsigned long lock_flags; | 3410 | unsigned long lock_flags; |
| 3352 | struct ipr_resource_entry *res; | ||
| 3353 | struct scsi_device *sdev; | ||
| 3354 | struct ipr_dump *dump; | 3411 | struct ipr_dump *dump; |
| 3355 | struct ipr_ioa_cfg *ioa_cfg = | 3412 | struct ipr_ioa_cfg *ioa_cfg = |
| 3356 | container_of(work, struct ipr_ioa_cfg, work_q); | 3413 | container_of(work, struct ipr_ioa_cfg, work_q); |
| 3357 | u8 bus, target, lun; | ||
| 3358 | int did_work; | ||
| 3359 | 3414 | ||
| 3360 | ENTER; | 3415 | ENTER; |
| 3361 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 3416 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
| @@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work) | |||
| 3393 | return; | 3448 | return; |
| 3394 | } | 3449 | } |
| 3395 | 3450 | ||
| 3396 | restart: | 3451 | schedule_work(&ioa_cfg->scsi_add_work_q); |
| 3397 | do { | ||
| 3398 | did_work = 0; | ||
| 3399 | if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) { | ||
| 3400 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3401 | return; | ||
| 3402 | } | ||
| 3403 | 3452 | ||
| 3404 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3405 | if (res->del_from_ml && res->sdev) { | ||
| 3406 | did_work = 1; | ||
| 3407 | sdev = res->sdev; | ||
| 3408 | if (!scsi_device_get(sdev)) { | ||
| 3409 | if (!res->add_to_ml) | ||
| 3410 | list_move_tail(&res->queue, &ioa_cfg->free_res_q); | ||
| 3411 | else | ||
| 3412 | res->del_from_ml = 0; | ||
| 3413 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3414 | scsi_remove_device(sdev); | ||
| 3415 | scsi_device_put(sdev); | ||
| 3416 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3417 | } | ||
| 3418 | break; | ||
| 3419 | } | ||
| 3420 | } | ||
| 3421 | } while (did_work); | ||
| 3422 | |||
| 3423 | list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { | ||
| 3424 | if (res->add_to_ml) { | ||
| 3425 | bus = res->bus; | ||
| 3426 | target = res->target; | ||
| 3427 | lun = res->lun; | ||
| 3428 | res->add_to_ml = 0; | ||
| 3429 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | ||
| 3430 | scsi_add_device(ioa_cfg->host, bus, target, lun); | ||
| 3431 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | ||
| 3432 | goto restart; | ||
| 3433 | } | ||
| 3434 | } | ||
| 3435 | |||
| 3436 | ioa_cfg->scan_done = 1; | ||
| 3437 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 3453 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
| 3438 | kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE); | ||
| 3439 | LEAVE; | 3454 | LEAVE; |
| 3440 | } | 3455 | } |
| 3441 | 3456 | ||
| @@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
| 9933 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); | 9948 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); |
| 9934 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 9949 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
| 9935 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); | 9950 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
| 9951 | INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread); | ||
| 9936 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 9952 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
| 9937 | init_waitqueue_head(&ioa_cfg->msi_wait_q); | 9953 | init_waitqueue_head(&ioa_cfg->msi_wait_q); |
| 9938 | init_waitqueue_head(&ioa_cfg->eeh_wait_q); | 9954 | init_waitqueue_head(&ioa_cfg->eeh_wait_q); |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 68afbbde54d3..f6baa2351313 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
| @@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg { | |||
| 1575 | u8 saved_mode_page_len; | 1575 | u8 saved_mode_page_len; |
| 1576 | 1576 | ||
| 1577 | struct work_struct work_q; | 1577 | struct work_struct work_q; |
| 1578 | struct work_struct scsi_add_work_q; | ||
| 1578 | struct workqueue_struct *reset_work_q; | 1579 | struct workqueue_struct *reset_work_q; |
| 1579 | 1580 | ||
| 1580 | wait_queue_head_t reset_wait_q; | 1581 | wait_queue_head_t reset_wait_q; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 057a60abe664..1a6ed9b0a249 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
| 360 | goto buffer_done; | 360 | goto buffer_done; |
| 361 | 361 | ||
| 362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
| 363 | nrport = NULL; | ||
| 364 | spin_lock(&vport->phba->hbalock); | ||
| 363 | rport = lpfc_ndlp_get_nrport(ndlp); | 365 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 364 | if (!rport) | 366 | if (rport) |
| 365 | continue; | 367 | nrport = rport->remoteport; |
| 366 | 368 | spin_unlock(&vport->phba->hbalock); | |
| 367 | /* local short-hand pointer. */ | ||
| 368 | nrport = rport->remoteport; | ||
| 369 | if (!nrport) | 369 | if (!nrport) |
| 370 | continue; | 370 | continue; |
| 371 | 371 | ||
| @@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
| 3386 | struct lpfc_nodelist *ndlp; | 3386 | struct lpfc_nodelist *ndlp; |
| 3387 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 3387 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 3388 | struct lpfc_nvme_rport *rport; | 3388 | struct lpfc_nvme_rport *rport; |
| 3389 | struct nvme_fc_remote_port *remoteport = NULL; | ||
| 3389 | #endif | 3390 | #endif |
| 3390 | 3391 | ||
| 3391 | shost = lpfc_shost_from_vport(vport); | 3392 | shost = lpfc_shost_from_vport(vport); |
| @@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) | |||
| 3396 | if (ndlp->rport) | 3397 | if (ndlp->rport) |
| 3397 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; | 3398 | ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; |
| 3398 | #if (IS_ENABLED(CONFIG_NVME_FC)) | 3399 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 3400 | spin_lock(&vport->phba->hbalock); | ||
| 3399 | rport = lpfc_ndlp_get_nrport(ndlp); | 3401 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 3400 | if (rport) | 3402 | if (rport) |
| 3403 | remoteport = rport->remoteport; | ||
| 3404 | spin_unlock(&vport->phba->hbalock); | ||
| 3405 | if (remoteport) | ||
| 3401 | nvme_fc_set_remoteport_devloss(rport->remoteport, | 3406 | nvme_fc_set_remoteport_devloss(rport->remoteport, |
| 3402 | vport->cfg_devloss_tmo); | 3407 | vport->cfg_devloss_tmo); |
| 3403 | #endif | 3408 | #endif |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 9df0c051349f..aec5b10a8c85 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
| @@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
| 551 | unsigned char *statep; | 551 | unsigned char *statep; |
| 552 | struct nvme_fc_local_port *localport; | 552 | struct nvme_fc_local_port *localport; |
| 553 | struct lpfc_nvmet_tgtport *tgtp; | 553 | struct lpfc_nvmet_tgtport *tgtp; |
| 554 | struct nvme_fc_remote_port *nrport; | 554 | struct nvme_fc_remote_port *nrport = NULL; |
| 555 | struct lpfc_nvme_rport *rport; | 555 | struct lpfc_nvme_rport *rport; |
| 556 | 556 | ||
| 557 | cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); | 557 | cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); |
| @@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) | |||
| 696 | len += snprintf(buf + len, size - len, "\tRport List:\n"); | 696 | len += snprintf(buf + len, size - len, "\tRport List:\n"); |
| 697 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 697 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
| 698 | /* local short-hand pointer. */ | 698 | /* local short-hand pointer. */ |
| 699 | spin_lock(&phba->hbalock); | ||
| 699 | rport = lpfc_ndlp_get_nrport(ndlp); | 700 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 700 | if (!rport) | 701 | if (rport) |
| 701 | continue; | 702 | nrport = rport->remoteport; |
| 702 | 703 | spin_unlock(&phba->hbalock); | |
| 703 | nrport = rport->remoteport; | ||
| 704 | if (!nrport) | 704 | if (!nrport) |
| 705 | continue; | 705 | continue; |
| 706 | 706 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 028462e5994d..918ae18ef8a8 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
| @@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2725 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); | 2725 | rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); |
| 2726 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); | 2726 | rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); |
| 2727 | 2727 | ||
| 2728 | spin_lock_irq(&vport->phba->hbalock); | ||
| 2728 | oldrport = lpfc_ndlp_get_nrport(ndlp); | 2729 | oldrport = lpfc_ndlp_get_nrport(ndlp); |
| 2730 | spin_unlock_irq(&vport->phba->hbalock); | ||
| 2729 | if (!oldrport) | 2731 | if (!oldrport) |
| 2730 | lpfc_nlp_get(ndlp); | 2732 | lpfc_nlp_get(ndlp); |
| 2731 | 2733 | ||
| @@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2840 | struct nvme_fc_local_port *localport; | 2842 | struct nvme_fc_local_port *localport; |
| 2841 | struct lpfc_nvme_lport *lport; | 2843 | struct lpfc_nvme_lport *lport; |
| 2842 | struct lpfc_nvme_rport *rport; | 2844 | struct lpfc_nvme_rport *rport; |
| 2843 | struct nvme_fc_remote_port *remoteport; | 2845 | struct nvme_fc_remote_port *remoteport = NULL; |
| 2844 | 2846 | ||
| 2845 | localport = vport->localport; | 2847 | localport = vport->localport; |
| 2846 | 2848 | ||
| @@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2854 | if (!lport) | 2856 | if (!lport) |
| 2855 | goto input_err; | 2857 | goto input_err; |
| 2856 | 2858 | ||
| 2859 | spin_lock_irq(&vport->phba->hbalock); | ||
| 2857 | rport = lpfc_ndlp_get_nrport(ndlp); | 2860 | rport = lpfc_ndlp_get_nrport(ndlp); |
| 2858 | if (!rport) | 2861 | if (rport) |
| 2862 | remoteport = rport->remoteport; | ||
| 2863 | spin_unlock_irq(&vport->phba->hbalock); | ||
| 2864 | if (!remoteport) | ||
| 2859 | goto input_err; | 2865 | goto input_err; |
| 2860 | 2866 | ||
| 2861 | remoteport = rport->remoteport; | ||
| 2862 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, | 2867 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, |
| 2863 | "6033 Unreg nvme remoteport %p, portname x%llx, " | 2868 | "6033 Unreg nvme remoteport %p, portname x%llx, " |
| 2864 | "port_id x%06x, portstate x%x port type x%x\n", | 2869 | "port_id x%06x, portstate x%x port type x%x\n", |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b79b366a94f7..4a57ffecc7e6 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd) | |||
| 1276 | case REQ_OP_ZONE_RESET: | 1276 | case REQ_OP_ZONE_RESET: |
| 1277 | return sd_zbc_setup_reset_cmnd(cmd); | 1277 | return sd_zbc_setup_reset_cmnd(cmd); |
| 1278 | default: | 1278 | default: |
| 1279 | BUG(); | 1279 | WARN_ON_ONCE(1); |
| 1280 | return BLKPREP_KILL; | ||
| 1280 | } | 1281 | } |
| 1281 | } | 1282 | } |
| 1282 | 1283 | ||
| @@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
| 2959 | if (rot == 1) { | 2960 | if (rot == 1) { |
| 2960 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 2961 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
| 2961 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | 2962 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
| 2963 | } else { | ||
| 2964 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
| 2965 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
| 2962 | } | 2966 | } |
| 2963 | 2967 | ||
| 2964 | if (sdkp->device->type == TYPE_ZBC) { | 2968 | if (sdkp->device->type == TYPE_ZBC) { |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 9d5d2ca7fc4f..c55f38ec391c 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) | |||
| 7940 | err = -ENOMEM; | 7940 | err = -ENOMEM; |
| 7941 | goto out_error; | 7941 | goto out_error; |
| 7942 | } | 7942 | } |
| 7943 | |||
| 7944 | /* | ||
| 7945 | * Do not use blk-mq at this time because blk-mq does not support | ||
| 7946 | * runtime pm. | ||
| 7947 | */ | ||
| 7948 | host->use_blk_mq = false; | ||
| 7949 | |||
| 7943 | hba = shost_priv(host); | 7950 | hba = shost_priv(host); |
| 7944 | hba->host = host; | 7951 | hba->host = host; |
| 7945 | hba->dev = dev; | 7952 | hba->dev = dev; |
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index 4b5e250e8615..e5c7e1ef6318 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c | |||
| @@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream) | |||
| 899 | struct sdw_master_runtime *m_rt = stream->m_rt; | 899 | struct sdw_master_runtime *m_rt = stream->m_rt; |
| 900 | struct sdw_slave_runtime *s_rt, *_s_rt; | 900 | struct sdw_slave_runtime *s_rt, *_s_rt; |
| 901 | 901 | ||
| 902 | list_for_each_entry_safe(s_rt, _s_rt, | 902 | list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) { |
| 903 | &m_rt->slave_rt_list, m_rt_node) | 903 | sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream); |
| 904 | sdw_stream_remove_slave(s_rt->slave, stream); | 904 | sdw_release_slave_stream(s_rt->slave, stream); |
| 905 | } | ||
| 905 | 906 | ||
| 906 | list_del(&m_rt->bus_node); | 907 | list_del(&m_rt->bus_node); |
| 907 | } | 908 | } |
| @@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus, | |||
| 1112 | "Master runtime config failed for stream:%s", | 1113 | "Master runtime config failed for stream:%s", |
| 1113 | stream->name); | 1114 | stream->name); |
| 1114 | ret = -ENOMEM; | 1115 | ret = -ENOMEM; |
| 1115 | goto error; | 1116 | goto unlock; |
| 1116 | } | 1117 | } |
| 1117 | 1118 | ||
| 1118 | ret = sdw_config_stream(bus->dev, stream, stream_config, false); | 1119 | ret = sdw_config_stream(bus->dev, stream, stream_config, false); |
| @@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus, | |||
| 1123 | if (ret) | 1124 | if (ret) |
| 1124 | goto stream_error; | 1125 | goto stream_error; |
| 1125 | 1126 | ||
| 1126 | stream->state = SDW_STREAM_CONFIGURED; | 1127 | goto unlock; |
| 1127 | 1128 | ||
| 1128 | stream_error: | 1129 | stream_error: |
| 1129 | sdw_release_master_stream(stream); | 1130 | sdw_release_master_stream(stream); |
| 1130 | error: | 1131 | unlock: |
| 1131 | mutex_unlock(&bus->bus_lock); | 1132 | mutex_unlock(&bus->bus_lock); |
| 1132 | return ret; | 1133 | return ret; |
| 1133 | } | 1134 | } |
| @@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master); | |||
| 1141 | * @stream: SoundWire stream | 1142 | * @stream: SoundWire stream |
| 1142 | * @port_config: Port configuration for audio stream | 1143 | * @port_config: Port configuration for audio stream |
| 1143 | * @num_ports: Number of ports | 1144 | * @num_ports: Number of ports |
| 1145 | * | ||
| 1146 | * It is expected that Slave is added before adding Master | ||
| 1147 | * to the Stream. | ||
| 1148 | * | ||
| 1144 | */ | 1149 | */ |
| 1145 | int sdw_stream_add_slave(struct sdw_slave *slave, | 1150 | int sdw_stream_add_slave(struct sdw_slave *slave, |
| 1146 | struct sdw_stream_config *stream_config, | 1151 | struct sdw_stream_config *stream_config, |
| @@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave, | |||
| 1186 | if (ret) | 1191 | if (ret) |
| 1187 | goto stream_error; | 1192 | goto stream_error; |
| 1188 | 1193 | ||
| 1194 | /* | ||
| 1195 | * Change stream state to CONFIGURED on first Slave add. | ||
| 1196 | * Bus is not aware of number of Slave(s) in a stream at this | ||
| 1197 | * point so cannot depend on all Slave(s) to be added in order to | ||
| 1198 | * change stream state to CONFIGURED. | ||
| 1199 | */ | ||
| 1189 | stream->state = SDW_STREAM_CONFIGURED; | 1200 | stream->state = SDW_STREAM_CONFIGURED; |
| 1190 | goto error; | 1201 | goto error; |
| 1191 | 1202 | ||
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 0626e6e3ea0c..421bfc7dda67 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c | |||
| @@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev, | |||
| 300 | *mflags |= SPI_MASTER_NO_RX; | 300 | *mflags |= SPI_MASTER_NO_RX; |
| 301 | 301 | ||
| 302 | spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); | 302 | spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); |
| 303 | if (IS_ERR(spi_gpio->mosi)) | 303 | if (IS_ERR(spi_gpio->sck)) |
| 304 | return PTR_ERR(spi_gpio->mosi); | 304 | return PTR_ERR(spi_gpio->sck); |
| 305 | 305 | ||
| 306 | for (i = 0; i < num_chipselects; i++) { | 306 | for (i = 0; i < num_chipselects; i++) { |
| 307 | spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", | 307 | spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 95dc4d78618d..b37de1d991d6 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
| @@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx, | |||
| 598 | 598 | ||
| 599 | ret = wait_event_interruptible_timeout(rspi->wait, | 599 | ret = wait_event_interruptible_timeout(rspi->wait, |
| 600 | rspi->dma_callbacked, HZ); | 600 | rspi->dma_callbacked, HZ); |
| 601 | if (ret > 0 && rspi->dma_callbacked) | 601 | if (ret > 0 && rspi->dma_callbacked) { |
| 602 | ret = 0; | 602 | ret = 0; |
| 603 | else if (!ret) { | 603 | } else { |
| 604 | dev_err(&rspi->master->dev, "DMA timeout\n"); | 604 | if (!ret) { |
| 605 | ret = -ETIMEDOUT; | 605 | dev_err(&rspi->master->dev, "DMA timeout\n"); |
| 606 | ret = -ETIMEDOUT; | ||
| 607 | } | ||
| 606 | if (tx) | 608 | if (tx) |
| 607 | dmaengine_terminate_all(rspi->master->dma_tx); | 609 | dmaengine_terminate_all(rspi->master->dma_tx); |
| 608 | if (rx) | 610 | if (rx) |
| @@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = { | |||
| 1350 | 1352 | ||
| 1351 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); | 1353 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); |
| 1352 | 1354 | ||
| 1355 | #ifdef CONFIG_PM_SLEEP | ||
| 1356 | static int rspi_suspend(struct device *dev) | ||
| 1357 | { | ||
| 1358 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1359 | struct rspi_data *rspi = platform_get_drvdata(pdev); | ||
| 1360 | |||
| 1361 | return spi_master_suspend(rspi->master); | ||
| 1362 | } | ||
| 1363 | |||
| 1364 | static int rspi_resume(struct device *dev) | ||
| 1365 | { | ||
| 1366 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1367 | struct rspi_data *rspi = platform_get_drvdata(pdev); | ||
| 1368 | |||
| 1369 | return spi_master_resume(rspi->master); | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); | ||
| 1373 | #define DEV_PM_OPS &rspi_pm_ops | ||
| 1374 | #else | ||
| 1375 | #define DEV_PM_OPS NULL | ||
| 1376 | #endif /* CONFIG_PM_SLEEP */ | ||
| 1377 | |||
| 1353 | static struct platform_driver rspi_driver = { | 1378 | static struct platform_driver rspi_driver = { |
| 1354 | .probe = rspi_probe, | 1379 | .probe = rspi_probe, |
| 1355 | .remove = rspi_remove, | 1380 | .remove = rspi_remove, |
| 1356 | .id_table = spi_driver_ids, | 1381 | .id_table = spi_driver_ids, |
| 1357 | .driver = { | 1382 | .driver = { |
| 1358 | .name = "renesas_spi", | 1383 | .name = "renesas_spi", |
| 1384 | .pm = DEV_PM_OPS, | ||
| 1359 | .of_match_table = of_match_ptr(rspi_of_match), | 1385 | .of_match_table = of_match_ptr(rspi_of_match), |
| 1360 | }, | 1386 | }, |
| 1361 | }; | 1387 | }; |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 539d6d1a277a..101cd6aae2ea 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
| @@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, | |||
| 397 | 397 | ||
| 398 | static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) | 398 | static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) |
| 399 | { | 399 | { |
| 400 | sh_msiof_write(p, STR, sh_msiof_read(p, STR)); | 400 | sh_msiof_write(p, STR, |
| 401 | sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); | ||
| 401 | } | 402 | } |
| 402 | 403 | ||
| 403 | static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, | 404 | static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, |
| @@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = { | |||
| 1426 | }; | 1427 | }; |
| 1427 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); | 1428 | MODULE_DEVICE_TABLE(platform, spi_driver_ids); |
| 1428 | 1429 | ||
| 1430 | #ifdef CONFIG_PM_SLEEP | ||
| 1431 | static int sh_msiof_spi_suspend(struct device *dev) | ||
| 1432 | { | ||
| 1433 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1434 | struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); | ||
| 1435 | |||
| 1436 | return spi_master_suspend(p->master); | ||
| 1437 | } | ||
| 1438 | |||
| 1439 | static int sh_msiof_spi_resume(struct device *dev) | ||
| 1440 | { | ||
| 1441 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1442 | struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev); | ||
| 1443 | |||
| 1444 | return spi_master_resume(p->master); | ||
| 1445 | } | ||
| 1446 | |||
| 1447 | static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, | ||
| 1448 | sh_msiof_spi_resume); | ||
| 1449 | #define DEV_PM_OPS &sh_msiof_spi_pm_ops | ||
| 1450 | #else | ||
| 1451 | #define DEV_PM_OPS NULL | ||
| 1452 | #endif /* CONFIG_PM_SLEEP */ | ||
| 1453 | |||
| 1429 | static struct platform_driver sh_msiof_spi_drv = { | 1454 | static struct platform_driver sh_msiof_spi_drv = { |
| 1430 | .probe = sh_msiof_spi_probe, | 1455 | .probe = sh_msiof_spi_probe, |
| 1431 | .remove = sh_msiof_spi_remove, | 1456 | .remove = sh_msiof_spi_remove, |
| 1432 | .id_table = spi_driver_ids, | 1457 | .id_table = spi_driver_ids, |
| 1433 | .driver = { | 1458 | .driver = { |
| 1434 | .name = "spi_sh_msiof", | 1459 | .name = "spi_sh_msiof", |
| 1460 | .pm = DEV_PM_OPS, | ||
| 1435 | .of_match_table = of_match_ptr(sh_msiof_match), | 1461 | .of_match_table = of_match_ptr(sh_msiof_match), |
| 1436 | }, | 1462 | }, |
| 1437 | }; | 1463 | }; |
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index 6f7b946b5ced..1427f343b39a 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
| @@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
| 1063 | goto exit_free_master; | 1063 | goto exit_free_master; |
| 1064 | } | 1064 | } |
| 1065 | 1065 | ||
| 1066 | /* disabled clock may cause interrupt storm upon request */ | ||
| 1067 | tspi->clk = devm_clk_get(&pdev->dev, NULL); | ||
| 1068 | if (IS_ERR(tspi->clk)) { | ||
| 1069 | ret = PTR_ERR(tspi->clk); | ||
| 1070 | dev_err(&pdev->dev, "Can not get clock %d\n", ret); | ||
| 1071 | goto exit_free_master; | ||
| 1072 | } | ||
| 1073 | ret = clk_prepare(tspi->clk); | ||
| 1074 | if (ret < 0) { | ||
| 1075 | dev_err(&pdev->dev, "Clock prepare failed %d\n", ret); | ||
| 1076 | goto exit_free_master; | ||
| 1077 | } | ||
| 1078 | ret = clk_enable(tspi->clk); | ||
| 1079 | if (ret < 0) { | ||
| 1080 | dev_err(&pdev->dev, "Clock enable failed %d\n", ret); | ||
| 1081 | goto exit_free_master; | ||
| 1082 | } | ||
| 1083 | |||
| 1066 | spi_irq = platform_get_irq(pdev, 0); | 1084 | spi_irq = platform_get_irq(pdev, 0); |
| 1067 | tspi->irq = spi_irq; | 1085 | tspi->irq = spi_irq; |
| 1068 | ret = request_threaded_irq(tspi->irq, tegra_slink_isr, | 1086 | ret = request_threaded_irq(tspi->irq, tegra_slink_isr, |
| @@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
| 1071 | if (ret < 0) { | 1089 | if (ret < 0) { |
| 1072 | dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", | 1090 | dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", |
| 1073 | tspi->irq); | 1091 | tspi->irq); |
| 1074 | goto exit_free_master; | 1092 | goto exit_clk_disable; |
| 1075 | } | ||
| 1076 | |||
| 1077 | tspi->clk = devm_clk_get(&pdev->dev, NULL); | ||
| 1078 | if (IS_ERR(tspi->clk)) { | ||
| 1079 | dev_err(&pdev->dev, "can not get clock\n"); | ||
| 1080 | ret = PTR_ERR(tspi->clk); | ||
| 1081 | goto exit_free_irq; | ||
| 1082 | } | 1093 | } |
| 1083 | 1094 | ||
| 1084 | tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); | 1095 | tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); |
| @@ -1138,6 +1149,8 @@ exit_rx_dma_free: | |||
| 1138 | tegra_slink_deinit_dma_param(tspi, true); | 1149 | tegra_slink_deinit_dma_param(tspi, true); |
| 1139 | exit_free_irq: | 1150 | exit_free_irq: |
| 1140 | free_irq(spi_irq, tspi); | 1151 | free_irq(spi_irq, tspi); |
| 1152 | exit_clk_disable: | ||
| 1153 | clk_disable(tspi->clk); | ||
| 1141 | exit_free_master: | 1154 | exit_free_master: |
| 1142 | spi_master_put(master); | 1155 | spi_master_put(master); |
| 1143 | return ret; | 1156 | return ret; |
| @@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev) | |||
| 1150 | 1163 | ||
| 1151 | free_irq(tspi->irq, tspi); | 1164 | free_irq(tspi->irq, tspi); |
| 1152 | 1165 | ||
| 1166 | clk_disable(tspi->clk); | ||
| 1167 | |||
| 1153 | if (tspi->tx_dma_chan) | 1168 | if (tspi->tx_dma_chan) |
| 1154 | tegra_slink_deinit_dma_param(tspi, false); | 1169 | tegra_slink_deinit_dma_param(tspi, false); |
| 1155 | 1170 | ||
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig index f48e06a03cdb..9a58aaf72edd 100644 --- a/drivers/staging/media/mt9t031/Kconfig +++ b/drivers/staging/media/mt9t031/Kconfig | |||
| @@ -1,9 +1,3 @@ | |||
| 1 | config SOC_CAMERA_IMX074 | ||
| 2 | tristate "imx074 support (DEPRECATED)" | ||
| 3 | depends on SOC_CAMERA && I2C | ||
| 4 | help | ||
| 5 | This driver supports IMX074 cameras from Sony | ||
| 6 | |||
| 7 | config SOC_CAMERA_MT9T031 | 1 | config SOC_CAMERA_MT9T031 |
| 8 | tristate "mt9t031 support (DEPRECATED)" | 2 | tristate "mt9t031 support (DEPRECATED)" |
| 9 | depends on SOC_CAMERA && I2C | 3 | depends on SOC_CAMERA && I2C |
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 9518ffd8b8ba..4e680d753941 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c | |||
| @@ -26,27 +26,6 @@ | |||
| 26 | #include "iscsi_target_nego.h" | 26 | #include "iscsi_target_nego.h" |
| 27 | #include "iscsi_target_auth.h" | 27 | #include "iscsi_target_auth.h" |
| 28 | 28 | ||
| 29 | static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len) | ||
| 30 | { | ||
| 31 | int j = DIV_ROUND_UP(len, 2), rc; | ||
| 32 | |||
| 33 | rc = hex2bin(dst, src, j); | ||
| 34 | if (rc < 0) | ||
| 35 | pr_debug("CHAP string contains non hex digit symbols\n"); | ||
| 36 | |||
| 37 | dst[j] = '\0'; | ||
| 38 | return j; | ||
| 39 | } | ||
| 40 | |||
| 41 | static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) | ||
| 42 | { | ||
| 43 | int i; | ||
| 44 | |||
| 45 | for (i = 0; i < src_len; i++) { | ||
| 46 | sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff); | ||
| 47 | } | ||
| 48 | } | ||
| 49 | |||
| 50 | static int chap_gen_challenge( | 29 | static int chap_gen_challenge( |
| 51 | struct iscsi_conn *conn, | 30 | struct iscsi_conn *conn, |
| 52 | int caller, | 31 | int caller, |
| @@ -62,7 +41,7 @@ static int chap_gen_challenge( | |||
| 62 | ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); | 41 | ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); |
| 63 | if (unlikely(ret)) | 42 | if (unlikely(ret)) |
| 64 | return ret; | 43 | return ret; |
| 65 | chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, | 44 | bin2hex(challenge_asciihex, chap->challenge, |
| 66 | CHAP_CHALLENGE_LENGTH); | 45 | CHAP_CHALLENGE_LENGTH); |
| 67 | /* | 46 | /* |
| 68 | * Set CHAP_C, and copy the generated challenge into c_str. | 47 | * Set CHAP_C, and copy the generated challenge into c_str. |
| @@ -248,9 +227,16 @@ static int chap_server_compute_md5( | |||
| 248 | pr_err("Could not find CHAP_R.\n"); | 227 | pr_err("Could not find CHAP_R.\n"); |
| 249 | goto out; | 228 | goto out; |
| 250 | } | 229 | } |
| 230 | if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) { | ||
| 231 | pr_err("Malformed CHAP_R\n"); | ||
| 232 | goto out; | ||
| 233 | } | ||
| 234 | if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) { | ||
| 235 | pr_err("Malformed CHAP_R\n"); | ||
| 236 | goto out; | ||
| 237 | } | ||
| 251 | 238 | ||
| 252 | pr_debug("[server] Got CHAP_R=%s\n", chap_r); | 239 | pr_debug("[server] Got CHAP_R=%s\n", chap_r); |
| 253 | chap_string_to_hex(client_digest, chap_r, strlen(chap_r)); | ||
| 254 | 240 | ||
| 255 | tfm = crypto_alloc_shash("md5", 0, 0); | 241 | tfm = crypto_alloc_shash("md5", 0, 0); |
| 256 | if (IS_ERR(tfm)) { | 242 | if (IS_ERR(tfm)) { |
| @@ -294,7 +280,7 @@ static int chap_server_compute_md5( | |||
| 294 | goto out; | 280 | goto out; |
| 295 | } | 281 | } |
| 296 | 282 | ||
| 297 | chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); | 283 | bin2hex(response, server_digest, MD5_SIGNATURE_SIZE); |
| 298 | pr_debug("[server] MD5 Server Digest: %s\n", response); | 284 | pr_debug("[server] MD5 Server Digest: %s\n", response); |
| 299 | 285 | ||
| 300 | if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { | 286 | if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { |
| @@ -349,9 +335,7 @@ static int chap_server_compute_md5( | |||
| 349 | pr_err("Could not find CHAP_C.\n"); | 335 | pr_err("Could not find CHAP_C.\n"); |
| 350 | goto out; | 336 | goto out; |
| 351 | } | 337 | } |
| 352 | pr_debug("[server] Got CHAP_C=%s\n", challenge); | 338 | challenge_len = DIV_ROUND_UP(strlen(challenge), 2); |
| 353 | challenge_len = chap_string_to_hex(challenge_binhex, challenge, | ||
| 354 | strlen(challenge)); | ||
| 355 | if (!challenge_len) { | 339 | if (!challenge_len) { |
| 356 | pr_err("Unable to convert incoming challenge\n"); | 340 | pr_err("Unable to convert incoming challenge\n"); |
| 357 | goto out; | 341 | goto out; |
| @@ -360,6 +344,11 @@ static int chap_server_compute_md5( | |||
| 360 | pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); | 344 | pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); |
| 361 | goto out; | 345 | goto out; |
| 362 | } | 346 | } |
| 347 | if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) { | ||
| 348 | pr_err("Malformed CHAP_C\n"); | ||
| 349 | goto out; | ||
| 350 | } | ||
| 351 | pr_debug("[server] Got CHAP_C=%s\n", challenge); | ||
| 363 | /* | 352 | /* |
| 364 | * During mutual authentication, the CHAP_C generated by the | 353 | * During mutual authentication, the CHAP_C generated by the |
| 365 | * initiator must not match the original CHAP_C generated by | 354 | * initiator must not match the original CHAP_C generated by |
| @@ -413,7 +402,7 @@ static int chap_server_compute_md5( | |||
| 413 | /* | 402 | /* |
| 414 | * Convert response from binary hex to ascii hext. | 403 | * Convert response from binary hex to ascii hext. |
| 415 | */ | 404 | */ |
| 416 | chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); | 405 | bin2hex(response, digest, MD5_SIGNATURE_SIZE); |
| 417 | *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", | 406 | *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", |
| 418 | response); | 407 | response); |
| 419 | *nr_out_len += 1; | 408 | *nr_out_len += 1; |
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c index 24a5f05e769b..e5389591bb4f 100644 --- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c | |||
| @@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) | |||
| 1054 | /* Get the address of the host memory buffer. | 1054 | /* Get the address of the host memory buffer. |
| 1055 | */ | 1055 | */ |
| 1056 | bdp = pinfo->rx_cur; | 1056 | bdp = pinfo->rx_cur; |
| 1057 | while (bdp->cbd_sc & BD_SC_EMPTY) | 1057 | if (bdp->cbd_sc & BD_SC_EMPTY) |
| 1058 | ; | 1058 | return NO_POLL_CHAR; |
| 1059 | 1059 | ||
| 1060 | /* If the buffer address is in the CPM DPRAM, don't | 1060 | /* If the buffer address is in the CPM DPRAM, don't |
| 1061 | * convert it. | 1061 | * convert it. |
| @@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port) | |||
| 1090 | poll_chars = 0; | 1090 | poll_chars = 0; |
| 1091 | } | 1091 | } |
| 1092 | if (poll_chars <= 0) { | 1092 | if (poll_chars <= 0) { |
| 1093 | poll_chars = poll_wait_key(poll_buf, pinfo); | 1093 | int ret = poll_wait_key(poll_buf, pinfo); |
| 1094 | |||
| 1095 | if (ret == NO_POLL_CHAR) | ||
| 1096 | return ret; | ||
| 1097 | poll_chars = ret; | ||
| 1094 | pollp = poll_buf; | 1098 | pollp = poll_buf; |
| 1095 | } | 1099 | } |
| 1096 | poll_chars--; | 1100 | poll_chars--; |
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 51e47a63d61a..3f8d1274fc85 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c | |||
| @@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport) | |||
| 979 | struct circ_buf *ring = &sport->rx_ring; | 979 | struct circ_buf *ring = &sport->rx_ring; |
| 980 | int ret, nent; | 980 | int ret, nent; |
| 981 | int bits, baud; | 981 | int bits, baud; |
| 982 | struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); | 982 | struct tty_port *port = &sport->port.state->port; |
| 983 | struct tty_struct *tty = port->tty; | ||
| 983 | struct ktermios *termios = &tty->termios; | 984 | struct ktermios *termios = &tty->termios; |
| 984 | 985 | ||
| 985 | baud = tty_get_baud_rate(tty); | 986 | baud = tty_get_baud_rate(tty); |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 239c0fa2e981..0f67197a3783 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
| @@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev) | |||
| 2351 | ret); | 2351 | ret); |
| 2352 | return ret; | 2352 | return ret; |
| 2353 | } | 2353 | } |
| 2354 | |||
| 2355 | ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, | ||
| 2356 | dev_name(&pdev->dev), sport); | ||
| 2357 | if (ret) { | ||
| 2358 | dev_err(&pdev->dev, "failed to request rts irq: %d\n", | ||
| 2359 | ret); | ||
| 2360 | return ret; | ||
| 2361 | } | ||
| 2354 | } else { | 2362 | } else { |
| 2355 | ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, | 2363 | ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, |
| 2356 | dev_name(&pdev->dev), sport); | 2364 | dev_name(&pdev->dev), sport); |
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index d04b5eeea3c6..170e446a2f62 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c | |||
| @@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port, | |||
| 511 | termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); | 511 | termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); |
| 512 | termios->c_cflag &= CREAD | CBAUD; | 512 | termios->c_cflag &= CREAD | CBAUD; |
| 513 | termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); | 513 | termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); |
| 514 | termios->c_cflag |= CS8; | ||
| 514 | } | 515 | } |
| 515 | 516 | ||
| 516 | spin_unlock_irqrestore(&port->lock, flags); | 517 | spin_unlock_irqrestore(&port->lock, flags); |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 32bc3e3fe4d3..5e5da9acaf0a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct * | |||
| 1255 | static int tty_reopen(struct tty_struct *tty) | 1255 | static int tty_reopen(struct tty_struct *tty) |
| 1256 | { | 1256 | { |
| 1257 | struct tty_driver *driver = tty->driver; | 1257 | struct tty_driver *driver = tty->driver; |
| 1258 | int retval; | ||
| 1258 | 1259 | ||
| 1259 | if (driver->type == TTY_DRIVER_TYPE_PTY && | 1260 | if (driver->type == TTY_DRIVER_TYPE_PTY && |
| 1260 | driver->subtype == PTY_TYPE_MASTER) | 1261 | driver->subtype == PTY_TYPE_MASTER) |
| @@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty) | |||
| 1268 | 1269 | ||
| 1269 | tty->count++; | 1270 | tty->count++; |
| 1270 | 1271 | ||
| 1271 | if (!tty->ldisc) | 1272 | if (tty->ldisc) |
| 1272 | return tty_ldisc_reinit(tty, tty->termios.c_line); | 1273 | return 0; |
| 1273 | 1274 | ||
| 1274 | return 0; | 1275 | retval = tty_ldisc_reinit(tty, tty->termios.c_line); |
| 1276 | if (retval) | ||
| 1277 | tty->count--; | ||
| 1278 | |||
| 1279 | return retval; | ||
| 1275 | } | 1280 | } |
| 1276 | 1281 | ||
| 1277 | /** | 1282 | /** |
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index a78ad10a119b..73cdc0d633dd 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #include <asm/io.h> | 32 | #include <asm/io.h> |
| 33 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
| 34 | 34 | ||
| 35 | #include <linux/nospec.h> | ||
| 36 | |||
| 35 | #include <linux/kbd_kern.h> | 37 | #include <linux/kbd_kern.h> |
| 36 | #include <linux/vt_kern.h> | 38 | #include <linux/vt_kern.h> |
| 37 | #include <linux/kbd_diacr.h> | 39 | #include <linux/kbd_diacr.h> |
| @@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty, | |||
| 700 | if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) | 702 | if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) |
| 701 | ret = -ENXIO; | 703 | ret = -ENXIO; |
| 702 | else { | 704 | else { |
| 705 | vsa.console = array_index_nospec(vsa.console, | ||
| 706 | MAX_NR_CONSOLES + 1); | ||
| 703 | vsa.console--; | 707 | vsa.console--; |
| 704 | console_lock(); | 708 | console_lock(); |
| 705 | ret = vc_allocate(vsa.console); | 709 | ret = vc_allocate(vsa.console); |
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 656d247819c9..bec581fb7c63 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c | |||
| @@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc) | |||
| 460 | 460 | ||
| 461 | set_bit(WDM_RESPONDING, &desc->flags); | 461 | set_bit(WDM_RESPONDING, &desc->flags); |
| 462 | spin_unlock_irq(&desc->iuspin); | 462 | spin_unlock_irq(&desc->iuspin); |
| 463 | rv = usb_submit_urb(desc->response, GFP_ATOMIC); | 463 | rv = usb_submit_urb(desc->response, GFP_KERNEL); |
| 464 | spin_lock_irq(&desc->iuspin); | 464 | spin_lock_irq(&desc->iuspin); |
| 465 | if (rv) { | 465 | if (rv) { |
| 466 | dev_err(&desc->intf->dev, | 466 | dev_err(&desc->intf->dev, |
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c index 15cc76e22123..99116af07f1d 100644 --- a/drivers/usb/common/roles.c +++ b/drivers/usb/common/roles.c | |||
| @@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep, | |||
| 109 | */ | 109 | */ |
| 110 | struct usb_role_switch *usb_role_switch_get(struct device *dev) | 110 | struct usb_role_switch *usb_role_switch_get(struct device *dev) |
| 111 | { | 111 | { |
| 112 | return device_connection_find_match(dev, "usb-role-switch", NULL, | 112 | struct usb_role_switch *sw; |
| 113 | usb_role_switch_match); | 113 | |
| 114 | sw = device_connection_find_match(dev, "usb-role-switch", NULL, | ||
| 115 | usb_role_switch_match); | ||
| 116 | |||
| 117 | if (!IS_ERR_OR_NULL(sw)) | ||
| 118 | WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); | ||
| 119 | |||
| 120 | return sw; | ||
| 114 | } | 121 | } |
| 115 | EXPORT_SYMBOL_GPL(usb_role_switch_get); | 122 | EXPORT_SYMBOL_GPL(usb_role_switch_get); |
| 116 | 123 | ||
| @@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get); | |||
| 122 | */ | 129 | */ |
| 123 | void usb_role_switch_put(struct usb_role_switch *sw) | 130 | void usb_role_switch_put(struct usb_role_switch *sw) |
| 124 | { | 131 | { |
| 125 | if (!IS_ERR_OR_NULL(sw)) | 132 | if (!IS_ERR_OR_NULL(sw)) { |
| 126 | put_device(&sw->dev); | 133 | put_device(&sw->dev); |
| 134 | module_put(sw->dev.parent->driver->owner); | ||
| 135 | } | ||
| 127 | } | 136 | } |
| 128 | EXPORT_SYMBOL_GPL(usb_role_switch_put); | 137 | EXPORT_SYMBOL_GPL(usb_role_switch_put); |
| 129 | 138 | ||
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 6ce77b33da61..244417d0dfd1 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
| @@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1434 | struct async *as = NULL; | 1434 | struct async *as = NULL; |
| 1435 | struct usb_ctrlrequest *dr = NULL; | 1435 | struct usb_ctrlrequest *dr = NULL; |
| 1436 | unsigned int u, totlen, isofrmlen; | 1436 | unsigned int u, totlen, isofrmlen; |
| 1437 | int i, ret, is_in, num_sgs = 0, ifnum = -1; | 1437 | int i, ret, num_sgs = 0, ifnum = -1; |
| 1438 | int number_of_packets = 0; | 1438 | int number_of_packets = 0; |
| 1439 | unsigned int stream_id = 0; | 1439 | unsigned int stream_id = 0; |
| 1440 | void *buf; | 1440 | void *buf; |
| 1441 | bool is_in; | ||
| 1442 | bool allow_short = false; | ||
| 1443 | bool allow_zero = false; | ||
| 1441 | unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | | 1444 | unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | |
| 1442 | USBDEVFS_URB_BULK_CONTINUATION | | 1445 | USBDEVFS_URB_BULK_CONTINUATION | |
| 1443 | USBDEVFS_URB_NO_FSBR | | 1446 | USBDEVFS_URB_NO_FSBR | |
| @@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1471 | u = 0; | 1474 | u = 0; |
| 1472 | switch (uurb->type) { | 1475 | switch (uurb->type) { |
| 1473 | case USBDEVFS_URB_TYPE_CONTROL: | 1476 | case USBDEVFS_URB_TYPE_CONTROL: |
| 1477 | if (is_in) | ||
| 1478 | allow_short = true; | ||
| 1474 | if (!usb_endpoint_xfer_control(&ep->desc)) | 1479 | if (!usb_endpoint_xfer_control(&ep->desc)) |
| 1475 | return -EINVAL; | 1480 | return -EINVAL; |
| 1476 | /* min 8 byte setup packet */ | 1481 | /* min 8 byte setup packet */ |
| @@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1511 | break; | 1516 | break; |
| 1512 | 1517 | ||
| 1513 | case USBDEVFS_URB_TYPE_BULK: | 1518 | case USBDEVFS_URB_TYPE_BULK: |
| 1519 | if (!is_in) | ||
| 1520 | allow_zero = true; | ||
| 1521 | else | ||
| 1522 | allow_short = true; | ||
| 1514 | switch (usb_endpoint_type(&ep->desc)) { | 1523 | switch (usb_endpoint_type(&ep->desc)) { |
| 1515 | case USB_ENDPOINT_XFER_CONTROL: | 1524 | case USB_ENDPOINT_XFER_CONTROL: |
| 1516 | case USB_ENDPOINT_XFER_ISOC: | 1525 | case USB_ENDPOINT_XFER_ISOC: |
| @@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1531 | if (!usb_endpoint_xfer_int(&ep->desc)) | 1540 | if (!usb_endpoint_xfer_int(&ep->desc)) |
| 1532 | return -EINVAL; | 1541 | return -EINVAL; |
| 1533 | interrupt_urb: | 1542 | interrupt_urb: |
| 1543 | if (!is_in) | ||
| 1544 | allow_zero = true; | ||
| 1545 | else | ||
| 1546 | allow_short = true; | ||
| 1534 | break; | 1547 | break; |
| 1535 | 1548 | ||
| 1536 | case USBDEVFS_URB_TYPE_ISO: | 1549 | case USBDEVFS_URB_TYPE_ISO: |
| @@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
| 1676 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); | 1689 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); |
| 1677 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) | 1690 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) |
| 1678 | u |= URB_ISO_ASAP; | 1691 | u |= URB_ISO_ASAP; |
| 1679 | if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) | 1692 | if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) |
| 1680 | u |= URB_SHORT_NOT_OK; | 1693 | u |= URB_SHORT_NOT_OK; |
| 1681 | if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) | 1694 | if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) |
| 1682 | u |= URB_ZERO_PACKET; | 1695 | u |= URB_ZERO_PACKET; |
| 1683 | if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) | 1696 | if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) |
| 1684 | u |= URB_NO_INTERRUPT; | 1697 | u |= URB_NO_INTERRUPT; |
| 1685 | as->urb->transfer_flags = u; | 1698 | as->urb->transfer_flags = u; |
| 1686 | 1699 | ||
| 1700 | if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) | ||
| 1701 | dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n"); | ||
| 1702 | if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET) | ||
| 1703 | dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n"); | ||
| 1704 | |||
| 1687 | as->urb->transfer_buffer_length = uurb->buffer_length; | 1705 | as->urb->transfer_buffer_length = uurb->buffer_length; |
| 1688 | as->urb->setup_packet = (unsigned char *)dr; | 1706 | as->urb->setup_packet = (unsigned char *)dr; |
| 1689 | dr = NULL; | 1707 | dr = NULL; |
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index e76e95f62f76..a1f225f077cd 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c | |||
| @@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 512 | struct device *dev; | 512 | struct device *dev; |
| 513 | struct usb_device *udev; | 513 | struct usb_device *udev; |
| 514 | int retval = 0; | 514 | int retval = 0; |
| 515 | int lpm_disable_error = -ENODEV; | ||
| 516 | 515 | ||
| 517 | if (!iface) | 516 | if (!iface) |
| 518 | return -ENODEV; | 517 | return -ENODEV; |
| @@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 533 | 532 | ||
| 534 | iface->condition = USB_INTERFACE_BOUND; | 533 | iface->condition = USB_INTERFACE_BOUND; |
| 535 | 534 | ||
| 536 | /* See the comment about disabling LPM in usb_probe_interface(). */ | ||
| 537 | if (driver->disable_hub_initiated_lpm) { | ||
| 538 | lpm_disable_error = usb_unlocked_disable_lpm(udev); | ||
| 539 | if (lpm_disable_error) { | ||
| 540 | dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n", | ||
| 541 | __func__, driver->name); | ||
| 542 | return -ENOMEM; | ||
| 543 | } | ||
| 544 | } | ||
| 545 | |||
| 546 | /* Claimed interfaces are initially inactive (suspended) and | 535 | /* Claimed interfaces are initially inactive (suspended) and |
| 547 | * runtime-PM-enabled, but only if the driver has autosuspend | 536 | * runtime-PM-enabled, but only if the driver has autosuspend |
| 548 | * support. Otherwise they are marked active, to prevent the | 537 | * support. Otherwise they are marked active, to prevent the |
| @@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver, | |||
| 561 | if (device_is_registered(dev)) | 550 | if (device_is_registered(dev)) |
| 562 | retval = device_bind_driver(dev); | 551 | retval = device_bind_driver(dev); |
| 563 | 552 | ||
| 564 | /* Attempt to re-enable USB3 LPM, if the disable was successful. */ | 553 | if (retval) { |
| 565 | if (!lpm_disable_error) | 554 | dev->driver = NULL; |
| 566 | usb_unlocked_enable_lpm(udev); | 555 | usb_set_intfdata(iface, NULL); |
| 556 | iface->needs_remote_wakeup = 0; | ||
| 557 | iface->condition = USB_INTERFACE_UNBOUND; | ||
| 558 | |||
| 559 | /* | ||
| 560 | * Unbound interfaces are always runtime-PM-disabled | ||
| 561 | * and runtime-PM-suspended | ||
| 562 | */ | ||
| 563 | if (driver->supports_autosuspend) | ||
| 564 | pm_runtime_disable(dev); | ||
| 565 | pm_runtime_set_suspended(dev); | ||
| 566 | } | ||
| 567 | 567 | ||
| 568 | return retval; | 568 | return retval; |
| 569 | } | 569 | } |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e77dfe5ed5ec..178d6c6063c0 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) | |||
| 58 | quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), | 58 | quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), |
| 59 | GFP_KERNEL); | 59 | GFP_KERNEL); |
| 60 | if (!quirk_list) { | 60 | if (!quirk_list) { |
| 61 | quirk_count = 0; | ||
| 61 | mutex_unlock(&quirk_mutex); | 62 | mutex_unlock(&quirk_mutex); |
| 62 | return -ENOMEM; | 63 | return -ENOMEM; |
| 63 | } | 64 | } |
| @@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = { | |||
| 154 | .string = quirks_param, | 155 | .string = quirks_param, |
| 155 | }; | 156 | }; |
| 156 | 157 | ||
| 157 | module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); | 158 | device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); |
| 158 | MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); | 159 | MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); |
| 159 | 160 | ||
| 160 | /* Lists of quirky USB devices, split in device quirks and interface quirks. | 161 | /* Lists of quirky USB devices, split in device quirks and interface quirks. |
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 623be3174fb3..79d8bd7a612e 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
| @@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting( | |||
| 228 | struct usb_interface_cache *intf_cache = NULL; | 228 | struct usb_interface_cache *intf_cache = NULL; |
| 229 | int i; | 229 | int i; |
| 230 | 230 | ||
| 231 | if (!config) | ||
| 232 | return NULL; | ||
| 231 | for (i = 0; i < config->desc.bNumInterfaces; i++) { | 233 | for (i = 0; i < config->desc.bNumInterfaces; i++) { |
| 232 | if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber | 234 | if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber |
| 233 | == iface_num) { | 235 | == iface_num) { |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index df827ff57b0d..23a0df79ef21 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
| @@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base) | |||
| 658 | return controller; | 658 | return controller; |
| 659 | } | 659 | } |
| 660 | 660 | ||
| 661 | static void dsps_dma_controller_destroy(struct dma_controller *c) | ||
| 662 | { | ||
| 663 | struct musb *musb = c->musb; | ||
| 664 | struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent); | ||
| 665 | void __iomem *usbss_base = glue->usbss_base; | ||
| 666 | |||
| 667 | musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP); | ||
| 668 | cppi41_dma_controller_destroy(c); | ||
| 669 | } | ||
| 670 | |||
| 671 | #ifdef CONFIG_PM_SLEEP | 661 | #ifdef CONFIG_PM_SLEEP |
| 672 | static void dsps_dma_controller_suspend(struct dsps_glue *glue) | 662 | static void dsps_dma_controller_suspend(struct dsps_glue *glue) |
| 673 | { | 663 | { |
| @@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = { | |||
| 697 | 687 | ||
| 698 | #ifdef CONFIG_USB_TI_CPPI41_DMA | 688 | #ifdef CONFIG_USB_TI_CPPI41_DMA |
| 699 | .dma_init = dsps_dma_controller_create, | 689 | .dma_init = dsps_dma_controller_create, |
| 700 | .dma_exit = dsps_dma_controller_destroy, | 690 | .dma_exit = cppi41_dma_controller_destroy, |
| 701 | #endif | 691 | #endif |
| 702 | .enable = dsps_musb_enable, | 692 | .enable = dsps_musb_enable, |
| 703 | .disable = dsps_musb_disable, | 693 | .disable = dsps_musb_disable, |
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index ddaac63ecf12..d990aa510fab 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
| 11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
| 12 | #include <linux/module.h> | ||
| 12 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
| 13 | #include <linux/usb/typec_mux.h> | 14 | #include <linux/usb/typec_mux.h> |
| 14 | 15 | ||
| @@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev) | |||
| 49 | mutex_lock(&switch_lock); | 50 | mutex_lock(&switch_lock); |
| 50 | sw = device_connection_find_match(dev, "typec-switch", NULL, | 51 | sw = device_connection_find_match(dev, "typec-switch", NULL, |
| 51 | typec_switch_match); | 52 | typec_switch_match); |
| 52 | if (!IS_ERR_OR_NULL(sw)) | 53 | if (!IS_ERR_OR_NULL(sw)) { |
| 54 | WARN_ON(!try_module_get(sw->dev->driver->owner)); | ||
| 53 | get_device(sw->dev); | 55 | get_device(sw->dev); |
| 56 | } | ||
| 54 | mutex_unlock(&switch_lock); | 57 | mutex_unlock(&switch_lock); |
| 55 | 58 | ||
| 56 | return sw; | 59 | return sw; |
| @@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get); | |||
| 65 | */ | 68 | */ |
| 66 | void typec_switch_put(struct typec_switch *sw) | 69 | void typec_switch_put(struct typec_switch *sw) |
| 67 | { | 70 | { |
| 68 | if (!IS_ERR_OR_NULL(sw)) | 71 | if (!IS_ERR_OR_NULL(sw)) { |
| 72 | module_put(sw->dev->driver->owner); | ||
| 69 | put_device(sw->dev); | 73 | put_device(sw->dev); |
| 74 | } | ||
| 70 | } | 75 | } |
| 71 | EXPORT_SYMBOL_GPL(typec_switch_put); | 76 | EXPORT_SYMBOL_GPL(typec_switch_put); |
| 72 | 77 | ||
| @@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name) | |||
| 136 | 141 | ||
| 137 | mutex_lock(&mux_lock); | 142 | mutex_lock(&mux_lock); |
| 138 | mux = device_connection_find_match(dev, name, NULL, typec_mux_match); | 143 | mux = device_connection_find_match(dev, name, NULL, typec_mux_match); |
| 139 | if (!IS_ERR_OR_NULL(mux)) | 144 | if (!IS_ERR_OR_NULL(mux)) { |
| 145 | WARN_ON(!try_module_get(mux->dev->driver->owner)); | ||
| 140 | get_device(mux->dev); | 146 | get_device(mux->dev); |
| 147 | } | ||
| 141 | mutex_unlock(&mux_lock); | 148 | mutex_unlock(&mux_lock); |
| 142 | 149 | ||
| 143 | return mux; | 150 | return mux; |
| @@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get); | |||
| 152 | */ | 159 | */ |
| 153 | void typec_mux_put(struct typec_mux *mux) | 160 | void typec_mux_put(struct typec_mux *mux) |
| 154 | { | 161 | { |
| 155 | if (!IS_ERR_OR_NULL(mux)) | 162 | if (!IS_ERR_OR_NULL(mux)) { |
| 163 | module_put(mux->dev->driver->owner); | ||
| 156 | put_device(mux->dev); | 164 | put_device(mux->dev); |
| 165 | } | ||
| 157 | } | 166 | } |
| 158 | EXPORT_SYMBOL_GPL(typec_mux_put); | 167 | EXPORT_SYMBOL_GPL(typec_mux_put); |
| 159 | 168 | ||
| @@ -1120,21 +1120,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry, | |||
| 1120 | { | 1120 | { |
| 1121 | struct inode *inode = mapping->host; | 1121 | struct inode *inode = mapping->host; |
| 1122 | unsigned long vaddr = vmf->address; | 1122 | unsigned long vaddr = vmf->address; |
| 1123 | vm_fault_t ret = VM_FAULT_NOPAGE; | 1123 | pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); |
| 1124 | struct page *zero_page; | 1124 | vm_fault_t ret; |
| 1125 | pfn_t pfn; | ||
| 1126 | |||
| 1127 | zero_page = ZERO_PAGE(0); | ||
| 1128 | if (unlikely(!zero_page)) { | ||
| 1129 | ret = VM_FAULT_OOM; | ||
| 1130 | goto out; | ||
| 1131 | } | ||
| 1132 | 1125 | ||
| 1133 | pfn = page_to_pfn_t(zero_page); | ||
| 1134 | dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, | 1126 | dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE, |
| 1135 | false); | 1127 | false); |
| 1136 | ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); | 1128 | ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); |
| 1137 | out: | ||
| 1138 | trace_dax_load_hole(inode, vmf, ret); | 1129 | trace_dax_load_hole(inode, vmf, ret); |
| 1139 | return ret; | 1130 | return ret; |
| 1140 | } | 1131 | } |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 7f7ee18fe179..e4bb9386c045 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
| @@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) | |||
| 1448 | } | 1448 | } |
| 1449 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); | 1449 | inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); |
| 1450 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); | 1450 | ei->i_flags = le32_to_cpu(raw_inode->i_flags); |
| 1451 | ext2_set_inode_flags(inode); | ||
| 1451 | ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); | 1452 | ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); |
| 1452 | ei->i_frag_no = raw_inode->i_frag; | 1453 | ei->i_frag_no = raw_inode->i_frag; |
| 1453 | ei->i_frag_size = raw_inode->i_fsize; | 1454 | ei->i_frag_size = raw_inode->i_fsize; |
| @@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) | |||
| 1517 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); | 1518 | new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); |
| 1518 | } | 1519 | } |
| 1519 | brelse (bh); | 1520 | brelse (bh); |
| 1520 | ext2_set_inode_flags(inode); | ||
| 1521 | unlock_new_inode(inode); | 1521 | unlock_new_inode(inode); |
| 1522 | return inode; | 1522 | return inode; |
| 1523 | 1523 | ||
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 582a0ec0aa70..777814755fa6 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h | |||
| @@ -89,7 +89,6 @@ struct drm_panel { | |||
| 89 | struct drm_device *drm; | 89 | struct drm_device *drm; |
| 90 | struct drm_connector *connector; | 90 | struct drm_connector *connector; |
| 91 | struct device *dev; | 91 | struct device *dev; |
| 92 | struct device_link *link; | ||
| 93 | 92 | ||
| 94 | const struct drm_panel_funcs *funcs; | 93 | const struct drm_panel_funcs *funcs; |
| 95 | 94 | ||
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h index a528747f8aed..e8338e5dc10b 100644 --- a/include/linux/mfd/rohm-bd718x7.h +++ b/include/linux/mfd/rohm-bd718x7.h | |||
| @@ -78,9 +78,9 @@ enum { | |||
| 78 | BD71837_REG_TRANS_COND0 = 0x1F, | 78 | BD71837_REG_TRANS_COND0 = 0x1F, |
| 79 | BD71837_REG_TRANS_COND1 = 0x20, | 79 | BD71837_REG_TRANS_COND1 = 0x20, |
| 80 | BD71837_REG_VRFAULTEN = 0x21, | 80 | BD71837_REG_VRFAULTEN = 0x21, |
| 81 | BD71837_REG_MVRFLTMASK0 = 0x22, | 81 | BD718XX_REG_MVRFLTMASK0 = 0x22, |
| 82 | BD71837_REG_MVRFLTMASK1 = 0x23, | 82 | BD718XX_REG_MVRFLTMASK1 = 0x23, |
| 83 | BD71837_REG_MVRFLTMASK2 = 0x24, | 83 | BD718XX_REG_MVRFLTMASK2 = 0x24, |
| 84 | BD71837_REG_RCVCFG = 0x25, | 84 | BD71837_REG_RCVCFG = 0x25, |
| 85 | BD71837_REG_RCVNUM = 0x26, | 85 | BD71837_REG_RCVNUM = 0x26, |
| 86 | BD71837_REG_PWRONCONFIG0 = 0x27, | 86 | BD71837_REG_PWRONCONFIG0 = 0x27, |
| @@ -159,6 +159,33 @@ enum { | |||
| 159 | #define BUCK8_MASK 0x3F | 159 | #define BUCK8_MASK 0x3F |
| 160 | #define BUCK8_DEFAULT 0x1E | 160 | #define BUCK8_DEFAULT 0x1E |
| 161 | 161 | ||
| 162 | /* BD718XX Voltage monitoring masks */ | ||
| 163 | #define BD718XX_BUCK1_VRMON80 0x1 | ||
| 164 | #define BD718XX_BUCK1_VRMON130 0x2 | ||
| 165 | #define BD718XX_BUCK2_VRMON80 0x4 | ||
| 166 | #define BD718XX_BUCK2_VRMON130 0x8 | ||
| 167 | #define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1 | ||
| 168 | #define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2 | ||
| 169 | #define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4 | ||
| 170 | #define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8 | ||
| 171 | #define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10 | ||
| 172 | #define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20 | ||
| 173 | #define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40 | ||
| 174 | #define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80 | ||
| 175 | #define BD718XX_LDO1_VRMON80 0x1 | ||
| 176 | #define BD718XX_LDO2_VRMON80 0x2 | ||
| 177 | #define BD718XX_LDO3_VRMON80 0x4 | ||
| 178 | #define BD718XX_LDO4_VRMON80 0x8 | ||
| 179 | #define BD718XX_LDO5_VRMON80 0x10 | ||
| 180 | #define BD718XX_LDO6_VRMON80 0x20 | ||
| 181 | |||
| 182 | /* BD71837 specific voltage monitoring masks */ | ||
| 183 | #define BD71837_BUCK3_VRMON80 0x10 | ||
| 184 | #define BD71837_BUCK3_VRMON130 0x20 | ||
| 185 | #define BD71837_BUCK4_VRMON80 0x40 | ||
| 186 | #define BD71837_BUCK4_VRMON130 0x80 | ||
| 187 | #define BD71837_LDO7_VRMON80 0x40 | ||
| 188 | |||
| 162 | /* BD71837_REG_IRQ bits */ | 189 | /* BD71837_REG_IRQ bits */ |
| 163 | #define IRQ_SWRST 0x40 | 190 | #define IRQ_SWRST 0x40 |
| 164 | #define IRQ_PWRON_S 0x20 | 191 | #define IRQ_PWRON_S 0x20 |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 67662d01130a..3ef82d3a78db 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
| @@ -49,8 +49,9 @@ struct netpoll_info { | |||
| 49 | }; | 49 | }; |
| 50 | 50 | ||
| 51 | #ifdef CONFIG_NETPOLL | 51 | #ifdef CONFIG_NETPOLL |
| 52 | extern void netpoll_poll_disable(struct net_device *dev); | 52 | void netpoll_poll_dev(struct net_device *dev); |
| 53 | extern void netpoll_poll_enable(struct net_device *dev); | 53 | void netpoll_poll_disable(struct net_device *dev); |
| 54 | void netpoll_poll_enable(struct net_device *dev); | ||
| 54 | #else | 55 | #else |
| 55 | static inline void netpoll_poll_disable(struct net_device *dev) { return; } | 56 | static inline void netpoll_poll_disable(struct net_device *dev) { return; } |
| 56 | static inline void netpoll_poll_enable(struct net_device *dev) { return; } | 57 | static inline void netpoll_poll_enable(struct net_device *dev) { return; } |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3468703d663a..a459a5e973a7 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
| @@ -48,9 +48,9 @@ struct regulator; | |||
| 48 | * DISABLE_IN_SUSPEND - turn off regulator in suspend states | 48 | * DISABLE_IN_SUSPEND - turn off regulator in suspend states |
| 49 | * ENABLE_IN_SUSPEND - keep regulator on in suspend states | 49 | * ENABLE_IN_SUSPEND - keep regulator on in suspend states |
| 50 | */ | 50 | */ |
| 51 | #define DO_NOTHING_IN_SUSPEND (-1) | 51 | #define DO_NOTHING_IN_SUSPEND 0 |
| 52 | #define DISABLE_IN_SUSPEND 0 | 52 | #define DISABLE_IN_SUSPEND 1 |
| 53 | #define ENABLE_IN_SUSPEND 1 | 53 | #define ENABLE_IN_SUSPEND 2 |
| 54 | 54 | ||
| 55 | /* Regulator active discharge flags */ | 55 | /* Regulator active discharge flags */ |
| 56 | enum regulator_active_discharge { | 56 | enum regulator_active_discharge { |
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index b2bd4b4127c4..69ee30456864 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h | |||
| @@ -81,8 +81,10 @@ enum spi_mem_data_dir { | |||
| 81 | * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes | 81 | * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes |
| 82 | * @data.buswidth: number of IO lanes used to send/receive the data | 82 | * @data.buswidth: number of IO lanes used to send/receive the data |
| 83 | * @data.dir: direction of the transfer | 83 | * @data.dir: direction of the transfer |
| 84 | * @data.buf.in: input buffer | 84 | * @data.nbytes: number of data bytes to send/receive. Can be zero if the |
| 85 | * @data.buf.out: output buffer | 85 | * operation does not involve transferring data |
| 86 | * @data.buf.in: input buffer (must be DMA-able) | ||
| 87 | * @data.buf.out: output buffer (must be DMA-able) | ||
| 86 | */ | 88 | */ |
| 87 | struct spi_mem_op { | 89 | struct spi_mem_op { |
| 88 | struct { | 90 | struct { |
| @@ -105,7 +107,6 @@ struct spi_mem_op { | |||
| 105 | u8 buswidth; | 107 | u8 buswidth; |
| 106 | enum spi_mem_data_dir dir; | 108 | enum spi_mem_data_dir dir; |
| 107 | unsigned int nbytes; | 109 | unsigned int nbytes; |
| 108 | /* buf.{in,out} must be DMA-able. */ | ||
| 109 | union { | 110 | union { |
| 110 | void *in; | 111 | void *in; |
| 111 | const void *out; | 112 | const void *out; |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c43e9a01b892..7ddfc65586b0 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | 30 | ||
| 31 | #define MTL_MAX_RX_QUEUES 8 | 31 | #define MTL_MAX_RX_QUEUES 8 |
| 32 | #define MTL_MAX_TX_QUEUES 8 | 32 | #define MTL_MAX_TX_QUEUES 8 |
| 33 | #define STMMAC_CH_MAX 8 | ||
| 33 | 34 | ||
| 34 | #define STMMAC_RX_COE_NONE 0 | 35 | #define STMMAC_RX_COE_NONE 0 |
| 35 | #define STMMAC_RX_COE_TYPE1 1 | 36 | #define STMMAC_RX_COE_TYPE1 1 |
diff --git a/include/linux/uio.h b/include/linux/uio.h index 409c845d4cd3..422b1c01ee0d 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) | |||
| 172 | static __always_inline __must_check | 172 | static __always_inline __must_check |
| 173 | size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) | 173 | size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i) |
| 174 | { | 174 | { |
| 175 | if (unlikely(!check_copy_size(addr, bytes, false))) | 175 | if (unlikely(!check_copy_size(addr, bytes, true))) |
| 176 | return 0; | 176 | return 0; |
| 177 | else | 177 | else |
| 178 | return _copy_to_iter_mcsafe(addr, bytes, i); | 178 | return _copy_to_iter_mcsafe(addr, bytes, i); |
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h index 316694dafa5b..008f466d1da7 100644 --- a/include/net/nfc/hci.h +++ b/include/net/nfc/hci.h | |||
| @@ -87,7 +87,7 @@ struct nfc_hci_pipe { | |||
| 87 | * According to specification 102 622 chapter 4.4 Pipes, | 87 | * According to specification 102 622 chapter 4.4 Pipes, |
| 88 | * the pipe identifier is 7 bits long. | 88 | * the pipe identifier is 7 bits long. |
| 89 | */ | 89 | */ |
| 90 | #define NFC_HCI_MAX_PIPES 127 | 90 | #define NFC_HCI_MAX_PIPES 128 |
| 91 | struct nfc_hci_init_data { | 91 | struct nfc_hci_init_data { |
| 92 | u8 gate_count; | 92 | u8 gate_count; |
| 93 | struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; | 93 | struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES]; |
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index 910cc4334b21..7b8c9e19bad1 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h | |||
| @@ -65,7 +65,7 @@ | |||
| 65 | 65 | ||
| 66 | /* keyctl structures */ | 66 | /* keyctl structures */ |
| 67 | struct keyctl_dh_params { | 67 | struct keyctl_dh_params { |
| 68 | __s32 dh_private; | 68 | __s32 private; |
| 69 | __s32 prime; | 69 | __s32 prime; |
| 70 | __s32 base; | 70 | __s32 base; |
| 71 | }; | 71 | }; |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 488ef9663c01..0a0f2ec75370 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -132,6 +132,7 @@ struct smap_psock { | |||
| 132 | struct work_struct gc_work; | 132 | struct work_struct gc_work; |
| 133 | 133 | ||
| 134 | struct proto *sk_proto; | 134 | struct proto *sk_proto; |
| 135 | void (*save_unhash)(struct sock *sk); | ||
| 135 | void (*save_close)(struct sock *sk, long timeout); | 136 | void (*save_close)(struct sock *sk, long timeout); |
| 136 | void (*save_data_ready)(struct sock *sk); | 137 | void (*save_data_ready)(struct sock *sk); |
| 137 | void (*save_write_space)(struct sock *sk); | 138 | void (*save_write_space)(struct sock *sk); |
| @@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
| 143 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); | 144 | static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); |
| 144 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, | 145 | static int bpf_tcp_sendpage(struct sock *sk, struct page *page, |
| 145 | int offset, size_t size, int flags); | 146 | int offset, size_t size, int flags); |
| 147 | static void bpf_tcp_unhash(struct sock *sk); | ||
| 146 | static void bpf_tcp_close(struct sock *sk, long timeout); | 148 | static void bpf_tcp_close(struct sock *sk, long timeout); |
| 147 | 149 | ||
| 148 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) | 150 | static inline struct smap_psock *smap_psock_sk(const struct sock *sk) |
| @@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], | |||
| 184 | struct proto *base) | 186 | struct proto *base) |
| 185 | { | 187 | { |
| 186 | prot[SOCKMAP_BASE] = *base; | 188 | prot[SOCKMAP_BASE] = *base; |
| 189 | prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash; | ||
| 187 | prot[SOCKMAP_BASE].close = bpf_tcp_close; | 190 | prot[SOCKMAP_BASE].close = bpf_tcp_close; |
| 188 | prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; | 191 | prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg; |
| 189 | prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; | 192 | prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read; |
| @@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk) | |||
| 217 | return -EBUSY; | 220 | return -EBUSY; |
| 218 | } | 221 | } |
| 219 | 222 | ||
| 223 | psock->save_unhash = sk->sk_prot->unhash; | ||
| 220 | psock->save_close = sk->sk_prot->close; | 224 | psock->save_close = sk->sk_prot->close; |
| 221 | psock->sk_proto = sk->sk_prot; | 225 | psock->sk_proto = sk->sk_prot; |
| 222 | 226 | ||
| @@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk, | |||
| 305 | return e; | 309 | return e; |
| 306 | } | 310 | } |
| 307 | 311 | ||
| 308 | static void bpf_tcp_close(struct sock *sk, long timeout) | 312 | static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock) |
| 309 | { | 313 | { |
| 310 | void (*close_fun)(struct sock *sk, long timeout); | ||
| 311 | struct smap_psock_map_entry *e; | 314 | struct smap_psock_map_entry *e; |
| 312 | struct sk_msg_buff *md, *mtmp; | 315 | struct sk_msg_buff *md, *mtmp; |
| 313 | struct smap_psock *psock; | ||
| 314 | struct sock *osk; | 316 | struct sock *osk; |
| 315 | 317 | ||
| 316 | lock_sock(sk); | ||
| 317 | rcu_read_lock(); | ||
| 318 | psock = smap_psock_sk(sk); | ||
| 319 | if (unlikely(!psock)) { | ||
| 320 | rcu_read_unlock(); | ||
| 321 | release_sock(sk); | ||
| 322 | return sk->sk_prot->close(sk, timeout); | ||
| 323 | } | ||
| 324 | |||
| 325 | /* The psock may be destroyed anytime after exiting the RCU critial | ||
| 326 | * section so by the time we use close_fun the psock may no longer | ||
| 327 | * be valid. However, bpf_tcp_close is called with the sock lock | ||
| 328 | * held so the close hook and sk are still valid. | ||
| 329 | */ | ||
| 330 | close_fun = psock->save_close; | ||
| 331 | |||
| 332 | if (psock->cork) { | 318 | if (psock->cork) { |
| 333 | free_start_sg(psock->sock, psock->cork, true); | 319 | free_start_sg(psock->sock, psock->cork, true); |
| 334 | kfree(psock->cork); | 320 | kfree(psock->cork); |
| @@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout) | |||
| 379 | kfree(e); | 365 | kfree(e); |
| 380 | e = psock_map_pop(sk, psock); | 366 | e = psock_map_pop(sk, psock); |
| 381 | } | 367 | } |
| 368 | } | ||
| 369 | |||
| 370 | static void bpf_tcp_unhash(struct sock *sk) | ||
| 371 | { | ||
| 372 | void (*unhash_fun)(struct sock *sk); | ||
| 373 | struct smap_psock *psock; | ||
| 374 | |||
| 375 | rcu_read_lock(); | ||
| 376 | psock = smap_psock_sk(sk); | ||
| 377 | if (unlikely(!psock)) { | ||
| 378 | rcu_read_unlock(); | ||
| 379 | if (sk->sk_prot->unhash) | ||
| 380 | sk->sk_prot->unhash(sk); | ||
| 381 | return; | ||
| 382 | } | ||
| 383 | unhash_fun = psock->save_unhash; | ||
| 384 | bpf_tcp_remove(sk, psock); | ||
| 385 | rcu_read_unlock(); | ||
| 386 | unhash_fun(sk); | ||
| 387 | } | ||
| 388 | |||
| 389 | static void bpf_tcp_close(struct sock *sk, long timeout) | ||
| 390 | { | ||
| 391 | void (*close_fun)(struct sock *sk, long timeout); | ||
| 392 | struct smap_psock *psock; | ||
| 393 | |||
| 394 | lock_sock(sk); | ||
| 395 | rcu_read_lock(); | ||
| 396 | psock = smap_psock_sk(sk); | ||
| 397 | if (unlikely(!psock)) { | ||
| 398 | rcu_read_unlock(); | ||
| 399 | release_sock(sk); | ||
| 400 | return sk->sk_prot->close(sk, timeout); | ||
| 401 | } | ||
| 402 | close_fun = psock->save_close; | ||
| 403 | bpf_tcp_remove(sk, psock); | ||
| 382 | rcu_read_unlock(); | 404 | rcu_read_unlock(); |
| 383 | release_sock(sk); | 405 | release_sock(sk); |
| 384 | close_fun(sk, timeout); | 406 | close_fun(sk, timeout); |
| @@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map, | |||
| 2097 | return -EINVAL; | 2119 | return -EINVAL; |
| 2098 | } | 2120 | } |
| 2099 | 2121 | ||
| 2122 | /* ULPs are currently supported only for TCP sockets in ESTABLISHED | ||
| 2123 | * state. | ||
| 2124 | */ | ||
| 2100 | if (skops.sk->sk_type != SOCK_STREAM || | 2125 | if (skops.sk->sk_type != SOCK_STREAM || |
| 2101 | skops.sk->sk_protocol != IPPROTO_TCP) { | 2126 | skops.sk->sk_protocol != IPPROTO_TCP || |
| 2127 | skops.sk->sk_state != TCP_ESTABLISHED) { | ||
| 2102 | fput(socket->file); | 2128 | fput(socket->file); |
| 2103 | return -EOPNOTSUPP; | 2129 | return -EOPNOTSUPP; |
| 2104 | } | 2130 | } |
| @@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map, | |||
| 2453 | return -EINVAL; | 2479 | return -EINVAL; |
| 2454 | } | 2480 | } |
| 2455 | 2481 | ||
| 2482 | /* ULPs are currently supported only for TCP sockets in ESTABLISHED | ||
| 2483 | * state. | ||
| 2484 | */ | ||
| 2485 | if (skops.sk->sk_type != SOCK_STREAM || | ||
| 2486 | skops.sk->sk_protocol != IPPROTO_TCP || | ||
| 2487 | skops.sk->sk_state != TCP_ESTABLISHED) { | ||
| 2488 | fput(socket->file); | ||
| 2489 | return -EOPNOTSUPP; | ||
| 2490 | } | ||
| 2491 | |||
| 2456 | lock_sock(skops.sk); | 2492 | lock_sock(skops.sk); |
| 2457 | preempt_disable(); | 2493 | preempt_disable(); |
| 2458 | rcu_read_lock(); | 2494 | rcu_read_lock(); |
| @@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = { | |||
| 2543 | .map_check_btf = map_check_no_btf, | 2579 | .map_check_btf = map_check_no_btf, |
| 2544 | }; | 2580 | }; |
| 2545 | 2581 | ||
| 2582 | static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops) | ||
| 2583 | { | ||
| 2584 | return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || | ||
| 2585 | ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB; | ||
| 2586 | } | ||
| 2546 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, | 2587 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
| 2547 | struct bpf_map *, map, void *, key, u64, flags) | 2588 | struct bpf_map *, map, void *, key, u64, flags) |
| 2548 | { | 2589 | { |
| 2549 | WARN_ON_ONCE(!rcu_read_lock_held()); | 2590 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 2591 | |||
| 2592 | /* ULPs are currently supported only for TCP sockets in ESTABLISHED | ||
| 2593 | * state. This checks that the sock ops triggering the update is | ||
| 2594 | * one indicating we are (or will be soon) in an ESTABLISHED state. | ||
| 2595 | */ | ||
| 2596 | if (!bpf_is_valid_sock_op(bpf_sock)) | ||
| 2597 | return -EOPNOTSUPP; | ||
| 2550 | return sock_map_ctx_update_elem(bpf_sock, map, key, flags); | 2598 | return sock_map_ctx_update_elem(bpf_sock, map, key, flags); |
| 2551 | } | 2599 | } |
| 2552 | 2600 | ||
| @@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock, | |||
| 2565 | struct bpf_map *, map, void *, key, u64, flags) | 2613 | struct bpf_map *, map, void *, key, u64, flags) |
| 2566 | { | 2614 | { |
| 2567 | WARN_ON_ONCE(!rcu_read_lock_held()); | 2615 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 2616 | |||
| 2617 | if (!bpf_is_valid_sock_op(bpf_sock)) | ||
| 2618 | return -EOPNOTSUPP; | ||
| 2568 | return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); | 2619 | return sock_hash_ctx_update_elem(bpf_sock, map, key, flags); |
| 2569 | } | 2620 | } |
| 2570 | 2621 | ||
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig index 9bd54304446f..1b1d63b3634b 100644 --- a/kernel/dma/Kconfig +++ b/kernel/dma/Kconfig | |||
| @@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU | |||
| 23 | bool | 23 | bool |
| 24 | select NEED_DMA_MAP_STATE | 24 | select NEED_DMA_MAP_STATE |
| 25 | 25 | ||
| 26 | config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL | ||
| 27 | bool | ||
| 28 | |||
| 26 | config DMA_DIRECT_OPS | 29 | config DMA_DIRECT_OPS |
| 27 | bool | 30 | bool |
| 28 | depends on HAS_DMA | 31 | depends on HAS_DMA |
diff --git a/kernel/events/core.c b/kernel/events/core.c index c80549bf82c6..5a97f34bc14c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -3935,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value, | |||
| 3935 | goto out; | 3935 | goto out; |
| 3936 | } | 3936 | } |
| 3937 | 3937 | ||
| 3938 | /* If this is a pinned event it must be running on this CPU */ | ||
| 3939 | if (event->attr.pinned && event->oncpu != smp_processor_id()) { | ||
| 3940 | ret = -EBUSY; | ||
| 3941 | goto out; | ||
| 3942 | } | ||
| 3943 | |||
| 3938 | /* | 3944 | /* |
| 3939 | * If the event is currently on this CPU, its either a per-task event, | 3945 | * If the event is currently on this CPU, its either a per-task event, |
| 3940 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise | 3946 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise |
| @@ -8308,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, | |||
| 8308 | goto unlock; | 8314 | goto unlock; |
| 8309 | 8315 | ||
| 8310 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 8316 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
| 8317 | if (event->cpu != smp_processor_id()) | ||
| 8318 | continue; | ||
| 8311 | if (event->attr.type != PERF_TYPE_TRACEPOINT) | 8319 | if (event->attr.type != PERF_TYPE_TRACEPOINT) |
| 8312 | continue; | 8320 | continue; |
| 8313 | if (event->attr.config != entry->type) | 8321 | if (event->attr.config != entry->type) |
| @@ -9425,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu) | |||
| 9425 | if (pmu->task_ctx_nr > perf_invalid_context) | 9433 | if (pmu->task_ctx_nr > perf_invalid_context) |
| 9426 | return; | 9434 | return; |
| 9427 | 9435 | ||
| 9428 | mutex_lock(&pmus_lock); | ||
| 9429 | free_percpu(pmu->pmu_cpu_context); | 9436 | free_percpu(pmu->pmu_cpu_context); |
| 9430 | mutex_unlock(&pmus_lock); | ||
| 9431 | } | 9437 | } |
| 9432 | 9438 | ||
| 9433 | /* | 9439 | /* |
| @@ -9683,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register); | |||
| 9683 | 9689 | ||
| 9684 | void perf_pmu_unregister(struct pmu *pmu) | 9690 | void perf_pmu_unregister(struct pmu *pmu) |
| 9685 | { | 9691 | { |
| 9686 | int remove_device; | ||
| 9687 | |||
| 9688 | mutex_lock(&pmus_lock); | 9692 | mutex_lock(&pmus_lock); |
| 9689 | remove_device = pmu_bus_running; | ||
| 9690 | list_del_rcu(&pmu->entry); | 9693 | list_del_rcu(&pmu->entry); |
| 9691 | mutex_unlock(&pmus_lock); | ||
| 9692 | 9694 | ||
| 9693 | /* | 9695 | /* |
| 9694 | * We dereference the pmu list under both SRCU and regular RCU, so | 9696 | * We dereference the pmu list under both SRCU and regular RCU, so |
| @@ -9700,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu) | |||
| 9700 | free_percpu(pmu->pmu_disable_count); | 9702 | free_percpu(pmu->pmu_disable_count); |
| 9701 | if (pmu->type >= PERF_TYPE_MAX) | 9703 | if (pmu->type >= PERF_TYPE_MAX) |
| 9702 | idr_remove(&pmu_idr, pmu->type); | 9704 | idr_remove(&pmu_idr, pmu->type); |
| 9703 | if (remove_device) { | 9705 | if (pmu_bus_running) { |
| 9704 | if (pmu->nr_addr_filters) | 9706 | if (pmu->nr_addr_filters) |
| 9705 | device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); | 9707 | device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); |
| 9706 | device_del(pmu->dev); | 9708 | device_del(pmu->dev); |
| 9707 | put_device(pmu->dev); | 9709 | put_device(pmu->dev); |
| 9708 | } | 9710 | } |
| 9709 | free_pmu_context(pmu); | 9711 | free_pmu_context(pmu); |
| 9712 | mutex_unlock(&pmus_lock); | ||
| 9710 | } | 9713 | } |
| 9711 | EXPORT_SYMBOL_GPL(perf_pmu_unregister); | 9714 | EXPORT_SYMBOL_GPL(perf_pmu_unregister); |
| 9712 | 9715 | ||
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 71c20c1d4002..9f481cfdf77d 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
| @@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) | |||
| 241 | * the packet to be exactly of that size to make the link | 241 | * the packet to be exactly of that size to make the link |
| 242 | * throughput estimation effective. | 242 | * throughput estimation effective. |
| 243 | */ | 243 | */ |
| 244 | skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len); | 244 | skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len); |
| 245 | 245 | ||
| 246 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 246 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
| 247 | "Sending unicast (probe) ELP packet on interface %s to %pM\n", | 247 | "Sending unicast (probe) ELP packet on interface %s to %pM\n", |
| @@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) | |||
| 268 | struct batadv_priv *bat_priv; | 268 | struct batadv_priv *bat_priv; |
| 269 | struct sk_buff *skb; | 269 | struct sk_buff *skb; |
| 270 | u32 elp_interval; | 270 | u32 elp_interval; |
| 271 | bool ret; | ||
| 271 | 272 | ||
| 272 | bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); | 273 | bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); |
| 273 | hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); | 274 | hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); |
| @@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) | |||
| 329 | * may sleep and that is not allowed in an rcu protected | 330 | * may sleep and that is not allowed in an rcu protected |
| 330 | * context. Therefore schedule a task for that. | 331 | * context. Therefore schedule a task for that. |
| 331 | */ | 332 | */ |
| 332 | queue_work(batadv_event_workqueue, | 333 | ret = queue_work(batadv_event_workqueue, |
| 333 | &hardif_neigh->bat_v.metric_work); | 334 | &hardif_neigh->bat_v.metric_work); |
| 335 | |||
| 336 | if (!ret) | ||
| 337 | batadv_hardif_neigh_put(hardif_neigh); | ||
| 334 | } | 338 | } |
| 335 | rcu_read_unlock(); | 339 | rcu_read_unlock(); |
| 336 | 340 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index ff9659af6b91..5f1aeeded0e3 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
| @@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 1772 | { | 1772 | { |
| 1773 | struct batadv_bla_backbone_gw *backbone_gw; | 1773 | struct batadv_bla_backbone_gw *backbone_gw; |
| 1774 | struct ethhdr *ethhdr; | 1774 | struct ethhdr *ethhdr; |
| 1775 | bool ret; | ||
| 1775 | 1776 | ||
| 1776 | ethhdr = eth_hdr(skb); | 1777 | ethhdr = eth_hdr(skb); |
| 1777 | 1778 | ||
| @@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, | |||
| 1795 | if (unlikely(!backbone_gw)) | 1796 | if (unlikely(!backbone_gw)) |
| 1796 | return true; | 1797 | return true; |
| 1797 | 1798 | ||
| 1798 | queue_work(batadv_event_workqueue, &backbone_gw->report_work); | 1799 | ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work); |
| 1799 | /* backbone_gw is unreferenced in the report work function function */ | 1800 | |
| 1801 | /* backbone_gw is unreferenced in the report work function function | ||
| 1802 | * if queue_work() call was successful | ||
| 1803 | */ | ||
| 1804 | if (!ret) | ||
| 1805 | batadv_backbone_gw_put(backbone_gw); | ||
| 1800 | 1806 | ||
| 1801 | return true; | 1807 | return true; |
| 1802 | } | 1808 | } |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index 8b198ee798c9..140c61a3f1ec 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
| 33 | #include <linux/kref.h> | 33 | #include <linux/kref.h> |
| 34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
| 35 | #include <linux/lockdep.h> | ||
| 35 | #include <linux/netdevice.h> | 36 | #include <linux/netdevice.h> |
| 36 | #include <linux/netlink.h> | 37 | #include <linux/netlink.h> |
| 37 | #include <linux/rculist.h> | 38 | #include <linux/rculist.h> |
| @@ -348,6 +349,9 @@ out: | |||
| 348 | * @bat_priv: the bat priv with all the soft interface information | 349 | * @bat_priv: the bat priv with all the soft interface information |
| 349 | * @orig_node: originator announcing gateway capabilities | 350 | * @orig_node: originator announcing gateway capabilities |
| 350 | * @gateway: announced bandwidth information | 351 | * @gateway: announced bandwidth information |
| 352 | * | ||
| 353 | * Has to be called with the appropriate locks being acquired | ||
| 354 | * (gw.list_lock). | ||
| 351 | */ | 355 | */ |
| 352 | static void batadv_gw_node_add(struct batadv_priv *bat_priv, | 356 | static void batadv_gw_node_add(struct batadv_priv *bat_priv, |
| 353 | struct batadv_orig_node *orig_node, | 357 | struct batadv_orig_node *orig_node, |
| @@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
| 355 | { | 359 | { |
| 356 | struct batadv_gw_node *gw_node; | 360 | struct batadv_gw_node *gw_node; |
| 357 | 361 | ||
| 362 | lockdep_assert_held(&bat_priv->gw.list_lock); | ||
| 363 | |||
| 358 | if (gateway->bandwidth_down == 0) | 364 | if (gateway->bandwidth_down == 0) |
| 359 | return; | 365 | return; |
| 360 | 366 | ||
| @@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, | |||
| 369 | gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); | 375 | gw_node->bandwidth_down = ntohl(gateway->bandwidth_down); |
| 370 | gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); | 376 | gw_node->bandwidth_up = ntohl(gateway->bandwidth_up); |
| 371 | 377 | ||
| 372 | spin_lock_bh(&bat_priv->gw.list_lock); | ||
| 373 | kref_get(&gw_node->refcount); | 378 | kref_get(&gw_node->refcount); |
| 374 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list); | 379 | hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list); |
| 375 | spin_unlock_bh(&bat_priv->gw.list_lock); | ||
| 376 | 380 | ||
| 377 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, | 381 | batadv_dbg(BATADV_DBG_BATMAN, bat_priv, |
| 378 | "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", | 382 | "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n", |
| @@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, | |||
| 428 | { | 432 | { |
| 429 | struct batadv_gw_node *gw_node, *curr_gw = NULL; | 433 | struct batadv_gw_node *gw_node, *curr_gw = NULL; |
| 430 | 434 | ||
| 435 | spin_lock_bh(&bat_priv->gw.list_lock); | ||
| 431 | gw_node = batadv_gw_node_get(bat_priv, orig_node); | 436 | gw_node = batadv_gw_node_get(bat_priv, orig_node); |
| 432 | if (!gw_node) { | 437 | if (!gw_node) { |
| 433 | batadv_gw_node_add(bat_priv, orig_node, gateway); | 438 | batadv_gw_node_add(bat_priv, orig_node, gateway); |
| 439 | spin_unlock_bh(&bat_priv->gw.list_lock); | ||
| 434 | goto out; | 440 | goto out; |
| 435 | } | 441 | } |
| 442 | spin_unlock_bh(&bat_priv->gw.list_lock); | ||
| 436 | 443 | ||
| 437 | if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) && | 444 | if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) && |
| 438 | gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)) | 445 | gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)) |
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 8da3c9336111..3ccc75ee719c 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #define BATADV_DRIVER_DEVICE "batman-adv" | 25 | #define BATADV_DRIVER_DEVICE "batman-adv" |
| 26 | 26 | ||
| 27 | #ifndef BATADV_SOURCE_VERSION | 27 | #ifndef BATADV_SOURCE_VERSION |
| 28 | #define BATADV_SOURCE_VERSION "2018.2" | 28 | #define BATADV_SOURCE_VERSION "2018.3" |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | /* B.A.T.M.A.N. parameters */ | 31 | /* B.A.T.M.A.N. parameters */ |
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index c3578444f3cb..34caf129a9bf 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c | |||
| @@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, | |||
| 854 | spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ | 854 | spinlock_t *lock; /* Used to lock list selected by "int in_coding" */ |
| 855 | struct list_head *list; | 855 | struct list_head *list; |
| 856 | 856 | ||
| 857 | /* Select ingoing or outgoing coding node */ | ||
| 858 | if (in_coding) { | ||
| 859 | lock = &orig_neigh_node->in_coding_list_lock; | ||
| 860 | list = &orig_neigh_node->in_coding_list; | ||
| 861 | } else { | ||
| 862 | lock = &orig_neigh_node->out_coding_list_lock; | ||
| 863 | list = &orig_neigh_node->out_coding_list; | ||
| 864 | } | ||
| 865 | |||
| 866 | spin_lock_bh(lock); | ||
| 867 | |||
| 857 | /* Check if nc_node is already added */ | 868 | /* Check if nc_node is already added */ |
| 858 | nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); | 869 | nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding); |
| 859 | 870 | ||
| 860 | /* Node found */ | 871 | /* Node found */ |
| 861 | if (nc_node) | 872 | if (nc_node) |
| 862 | return nc_node; | 873 | goto unlock; |
| 863 | 874 | ||
| 864 | nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); | 875 | nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC); |
| 865 | if (!nc_node) | 876 | if (!nc_node) |
| 866 | return NULL; | 877 | goto unlock; |
| 867 | 878 | ||
| 868 | /* Initialize nc_node */ | 879 | /* Initialize nc_node */ |
| 869 | INIT_LIST_HEAD(&nc_node->list); | 880 | INIT_LIST_HEAD(&nc_node->list); |
| @@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, | |||
| 872 | kref_get(&orig_neigh_node->refcount); | 883 | kref_get(&orig_neigh_node->refcount); |
| 873 | nc_node->orig_node = orig_neigh_node; | 884 | nc_node->orig_node = orig_neigh_node; |
| 874 | 885 | ||
| 875 | /* Select ingoing or outgoing coding node */ | ||
| 876 | if (in_coding) { | ||
| 877 | lock = &orig_neigh_node->in_coding_list_lock; | ||
| 878 | list = &orig_neigh_node->in_coding_list; | ||
| 879 | } else { | ||
| 880 | lock = &orig_neigh_node->out_coding_list_lock; | ||
| 881 | list = &orig_neigh_node->out_coding_list; | ||
| 882 | } | ||
| 883 | |||
| 884 | batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", | 886 | batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n", |
| 885 | nc_node->addr, nc_node->orig_node->orig); | 887 | nc_node->addr, nc_node->orig_node->orig); |
| 886 | 888 | ||
| 887 | /* Add nc_node to orig_node */ | 889 | /* Add nc_node to orig_node */ |
| 888 | spin_lock_bh(lock); | ||
| 889 | kref_get(&nc_node->refcount); | 890 | kref_get(&nc_node->refcount); |
| 890 | list_add_tail_rcu(&nc_node->list, list); | 891 | list_add_tail_rcu(&nc_node->list, list); |
| 892 | |||
| 893 | unlock: | ||
| 891 | spin_unlock_bh(lock); | 894 | spin_unlock_bh(lock); |
| 892 | 895 | ||
| 893 | return nc_node; | 896 | return nc_node; |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 1485263a348b..626ddca332db 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
| @@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) | |||
| 574 | struct batadv_softif_vlan *vlan; | 574 | struct batadv_softif_vlan *vlan; |
| 575 | int err; | 575 | int err; |
| 576 | 576 | ||
| 577 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); | ||
| 578 | |||
| 577 | vlan = batadv_softif_vlan_get(bat_priv, vid); | 579 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
| 578 | if (vlan) { | 580 | if (vlan) { |
| 579 | batadv_softif_vlan_put(vlan); | 581 | batadv_softif_vlan_put(vlan); |
| 582 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
| 580 | return -EEXIST; | 583 | return -EEXIST; |
| 581 | } | 584 | } |
| 582 | 585 | ||
| 583 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); | 586 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); |
| 584 | if (!vlan) | 587 | if (!vlan) { |
| 588 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
| 585 | return -ENOMEM; | 589 | return -ENOMEM; |
| 590 | } | ||
| 586 | 591 | ||
| 587 | vlan->bat_priv = bat_priv; | 592 | vlan->bat_priv = bat_priv; |
| 588 | vlan->vid = vid; | 593 | vlan->vid = vid; |
| @@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) | |||
| 590 | 595 | ||
| 591 | atomic_set(&vlan->ap_isolation, 0); | 596 | atomic_set(&vlan->ap_isolation, 0); |
| 592 | 597 | ||
| 598 | kref_get(&vlan->refcount); | ||
| 599 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); | ||
| 600 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
| 601 | |||
| 602 | /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the | ||
| 603 | * sleeping behavior of the sysfs functions and the fs_reclaim lock | ||
| 604 | */ | ||
| 593 | err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); | 605 | err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); |
| 594 | if (err) { | 606 | if (err) { |
| 595 | kfree(vlan); | 607 | /* ref for the function */ |
| 608 | batadv_softif_vlan_put(vlan); | ||
| 609 | |||
| 610 | /* ref for the list */ | ||
| 611 | batadv_softif_vlan_put(vlan); | ||
| 596 | return err; | 612 | return err; |
| 597 | } | 613 | } |
| 598 | 614 | ||
| 599 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); | ||
| 600 | kref_get(&vlan->refcount); | ||
| 601 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); | ||
| 602 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); | ||
| 603 | |||
| 604 | /* add a new TT local entry. This one will be marked with the NOPURGE | 615 | /* add a new TT local entry. This one will be marked with the NOPURGE |
| 605 | * flag | 616 | * flag |
| 606 | */ | 617 | */ |
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c index f2eef43bd2ec..09427fc6494a 100644 --- a/net/batman-adv/sysfs.c +++ b/net/batman-adv/sysfs.c | |||
| @@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \ | |||
| 188 | \ | 188 | \ |
| 189 | return __batadv_store_uint_attr(buff, count, _min, _max, \ | 189 | return __batadv_store_uint_attr(buff, count, _min, _max, \ |
| 190 | _post_func, attr, \ | 190 | _post_func, attr, \ |
| 191 | &bat_priv->_var, net_dev); \ | 191 | &bat_priv->_var, net_dev, \ |
| 192 | NULL); \ | ||
| 192 | } | 193 | } |
| 193 | 194 | ||
| 194 | #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \ | 195 | #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \ |
| @@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \ | |||
| 262 | \ | 263 | \ |
| 263 | length = __batadv_store_uint_attr(buff, count, _min, _max, \ | 264 | length = __batadv_store_uint_attr(buff, count, _min, _max, \ |
| 264 | _post_func, attr, \ | 265 | _post_func, attr, \ |
| 265 | &hard_iface->_var, net_dev); \ | 266 | &hard_iface->_var, \ |
| 267 | hard_iface->soft_iface, \ | ||
| 268 | net_dev); \ | ||
| 266 | \ | 269 | \ |
| 267 | batadv_hardif_put(hard_iface); \ | 270 | batadv_hardif_put(hard_iface); \ |
| 268 | return length; \ | 271 | return length; \ |
| @@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count, | |||
| 356 | 359 | ||
| 357 | static int batadv_store_uint_attr(const char *buff, size_t count, | 360 | static int batadv_store_uint_attr(const char *buff, size_t count, |
| 358 | struct net_device *net_dev, | 361 | struct net_device *net_dev, |
| 362 | struct net_device *slave_dev, | ||
| 359 | const char *attr_name, | 363 | const char *attr_name, |
| 360 | unsigned int min, unsigned int max, | 364 | unsigned int min, unsigned int max, |
| 361 | atomic_t *attr) | 365 | atomic_t *attr) |
| 362 | { | 366 | { |
| 367 | char ifname[IFNAMSIZ + 3] = ""; | ||
| 363 | unsigned long uint_val; | 368 | unsigned long uint_val; |
| 364 | int ret; | 369 | int ret; |
| 365 | 370 | ||
| @@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count, | |||
| 385 | if (atomic_read(attr) == uint_val) | 390 | if (atomic_read(attr) == uint_val) |
| 386 | return count; | 391 | return count; |
| 387 | 392 | ||
| 388 | batadv_info(net_dev, "%s: Changing from: %i to: %lu\n", | 393 | if (slave_dev) |
| 389 | attr_name, atomic_read(attr), uint_val); | 394 | snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name); |
| 395 | |||
| 396 | batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n", | ||
| 397 | attr_name, ifname, atomic_read(attr), uint_val); | ||
| 390 | 398 | ||
| 391 | atomic_set(attr, uint_val); | 399 | atomic_set(attr, uint_val); |
| 392 | return count; | 400 | return count; |
| @@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count, | |||
| 397 | void (*post_func)(struct net_device *), | 405 | void (*post_func)(struct net_device *), |
| 398 | const struct attribute *attr, | 406 | const struct attribute *attr, |
| 399 | atomic_t *attr_store, | 407 | atomic_t *attr_store, |
| 400 | struct net_device *net_dev) | 408 | struct net_device *net_dev, |
| 409 | struct net_device *slave_dev) | ||
| 401 | { | 410 | { |
| 402 | int ret; | 411 | int ret; |
| 403 | 412 | ||
| 404 | ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max, | 413 | ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev, |
| 405 | attr_store); | 414 | attr->name, min, max, attr_store); |
| 406 | if (post_func && ret) | 415 | if (post_func && ret) |
| 407 | post_func(net_dev); | 416 | post_func(net_dev); |
| 408 | 417 | ||
| @@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj, | |||
| 571 | return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, | 580 | return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE, |
| 572 | batadv_post_gw_reselect, attr, | 581 | batadv_post_gw_reselect, attr, |
| 573 | &bat_priv->gw.sel_class, | 582 | &bat_priv->gw.sel_class, |
| 574 | bat_priv->soft_iface); | 583 | bat_priv->soft_iface, NULL); |
| 575 | } | 584 | } |
| 576 | 585 | ||
| 577 | static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, | 586 | static ssize_t batadv_show_gw_bwidth(struct kobject *kobj, |
| @@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj, | |||
| 1090 | if (old_tp_override == tp_override) | 1099 | if (old_tp_override == tp_override) |
| 1091 | goto out; | 1100 | goto out; |
| 1092 | 1101 | ||
| 1093 | batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n", | 1102 | batadv_info(hard_iface->soft_iface, |
| 1094 | "throughput_override", | 1103 | "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n", |
| 1104 | "throughput_override", net_dev->name, | ||
| 1095 | old_tp_override / 10, old_tp_override % 10, | 1105 | old_tp_override / 10, old_tp_override % 10, |
| 1096 | tp_override / 10, tp_override % 10); | 1106 | tp_override / 10, tp_override % 10); |
| 1097 | 1107 | ||
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 12a2b7d21376..d21624c44665 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
| 1613 | { | 1613 | { |
| 1614 | struct batadv_tt_orig_list_entry *orig_entry; | 1614 | struct batadv_tt_orig_list_entry *orig_entry; |
| 1615 | 1615 | ||
| 1616 | spin_lock_bh(&tt_global->list_lock); | ||
| 1617 | |||
| 1616 | orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); | 1618 | orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node); |
| 1617 | if (orig_entry) { | 1619 | if (orig_entry) { |
| 1618 | /* refresh the ttvn: the current value could be a bogus one that | 1620 | /* refresh the ttvn: the current value could be a bogus one that |
| @@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, | |||
| 1635 | orig_entry->flags = flags; | 1637 | orig_entry->flags = flags; |
| 1636 | kref_init(&orig_entry->refcount); | 1638 | kref_init(&orig_entry->refcount); |
| 1637 | 1639 | ||
| 1638 | spin_lock_bh(&tt_global->list_lock); | ||
| 1639 | kref_get(&orig_entry->refcount); | 1640 | kref_get(&orig_entry->refcount); |
| 1640 | hlist_add_head_rcu(&orig_entry->list, | 1641 | hlist_add_head_rcu(&orig_entry->list, |
| 1641 | &tt_global->orig_list); | 1642 | &tt_global->orig_list); |
| 1642 | spin_unlock_bh(&tt_global->list_lock); | ||
| 1643 | atomic_inc(&tt_global->orig_list_count); | 1643 | atomic_inc(&tt_global->orig_list_count); |
| 1644 | 1644 | ||
| 1645 | sync_flags: | 1645 | sync_flags: |
| @@ -1647,6 +1647,8 @@ sync_flags: | |||
| 1647 | out: | 1647 | out: |
| 1648 | if (orig_entry) | 1648 | if (orig_entry) |
| 1649 | batadv_tt_orig_list_entry_put(orig_entry); | 1649 | batadv_tt_orig_list_entry_put(orig_entry); |
| 1650 | |||
| 1651 | spin_unlock_bh(&tt_global->list_lock); | ||
| 1650 | } | 1652 | } |
| 1651 | 1653 | ||
| 1652 | /** | 1654 | /** |
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index a637458205d1..40e69c9346d2 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c | |||
| @@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, | |||
| 529 | { | 529 | { |
| 530 | struct batadv_tvlv_handler *tvlv_handler; | 530 | struct batadv_tvlv_handler *tvlv_handler; |
| 531 | 531 | ||
| 532 | spin_lock_bh(&bat_priv->tvlv.handler_list_lock); | ||
| 533 | |||
| 532 | tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); | 534 | tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version); |
| 533 | if (tvlv_handler) { | 535 | if (tvlv_handler) { |
| 536 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); | ||
| 534 | batadv_tvlv_handler_put(tvlv_handler); | 537 | batadv_tvlv_handler_put(tvlv_handler); |
| 535 | return; | 538 | return; |
| 536 | } | 539 | } |
| 537 | 540 | ||
| 538 | tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); | 541 | tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC); |
| 539 | if (!tvlv_handler) | 542 | if (!tvlv_handler) { |
| 543 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); | ||
| 540 | return; | 544 | return; |
| 545 | } | ||
| 541 | 546 | ||
| 542 | tvlv_handler->ogm_handler = optr; | 547 | tvlv_handler->ogm_handler = optr; |
| 543 | tvlv_handler->unicast_handler = uptr; | 548 | tvlv_handler->unicast_handler = uptr; |
| @@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, | |||
| 547 | kref_init(&tvlv_handler->refcount); | 552 | kref_init(&tvlv_handler->refcount); |
| 548 | INIT_HLIST_NODE(&tvlv_handler->list); | 553 | INIT_HLIST_NODE(&tvlv_handler->list); |
| 549 | 554 | ||
| 550 | spin_lock_bh(&bat_priv->tvlv.handler_list_lock); | ||
| 551 | kref_get(&tvlv_handler->refcount); | 555 | kref_get(&tvlv_handler->refcount); |
| 552 | hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); | 556 | hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list); |
| 553 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); | 557 | spin_unlock_bh(&bat_priv->tvlv.handler_list_lock); |
diff --git a/net/core/devlink.c b/net/core/devlink.c index 65fc366a78a4..8c0ed225e280 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
| @@ -2592,7 +2592,7 @@ send_done: | |||
| 2592 | if (!nlh) { | 2592 | if (!nlh) { |
| 2593 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); | 2593 | err = devlink_dpipe_send_and_alloc_skb(&skb, info); |
| 2594 | if (err) | 2594 | if (err) |
| 2595 | goto err_skb_send_alloc; | 2595 | return err; |
| 2596 | goto send_done; | 2596 | goto send_done; |
| 2597 | } | 2597 | } |
| 2598 | return genlmsg_reply(skb, info); | 2598 | return genlmsg_reply(skb, info); |
| @@ -2600,7 +2600,6 @@ send_done: | |||
| 2600 | nla_put_failure: | 2600 | nla_put_failure: |
| 2601 | err = -EMSGSIZE; | 2601 | err = -EMSGSIZE; |
| 2602 | err_resource_put: | 2602 | err_resource_put: |
| 2603 | err_skb_send_alloc: | ||
| 2604 | nlmsg_free(skb); | 2603 | nlmsg_free(skb); |
| 2605 | return err; | 2604 | return err; |
| 2606 | } | 2605 | } |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index c9993c6c2fd4..234a0ec2e932 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
| @@ -2624,6 +2624,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
| 2624 | case ETHTOOL_GPHYSTATS: | 2624 | case ETHTOOL_GPHYSTATS: |
| 2625 | case ETHTOOL_GTSO: | 2625 | case ETHTOOL_GTSO: |
| 2626 | case ETHTOOL_GPERMADDR: | 2626 | case ETHTOOL_GPERMADDR: |
| 2627 | case ETHTOOL_GUFO: | ||
| 2627 | case ETHTOOL_GGSO: | 2628 | case ETHTOOL_GGSO: |
| 2628 | case ETHTOOL_GGRO: | 2629 | case ETHTOOL_GGRO: |
| 2629 | case ETHTOOL_GFLAGS: | 2630 | case ETHTOOL_GFLAGS: |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 57557a6a950c..3219a2932463 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
| @@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev) | |||
| 187 | } | 187 | } |
| 188 | } | 188 | } |
| 189 | 189 | ||
| 190 | static void netpoll_poll_dev(struct net_device *dev) | 190 | void netpoll_poll_dev(struct net_device *dev) |
| 191 | { | 191 | { |
| 192 | const struct net_device_ops *ops; | ||
| 193 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); | 192 | struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); |
| 193 | const struct net_device_ops *ops; | ||
| 194 | 194 | ||
| 195 | /* Don't do any rx activity if the dev_lock mutex is held | 195 | /* Don't do any rx activity if the dev_lock mutex is held |
| 196 | * the dev_open/close paths use this to block netpoll activity | 196 | * the dev_open/close paths use this to block netpoll activity |
| 197 | * while changing device state | 197 | * while changing device state |
| 198 | */ | 198 | */ |
| 199 | if (down_trylock(&ni->dev_lock)) | 199 | if (!ni || down_trylock(&ni->dev_lock)) |
| 200 | return; | 200 | return; |
| 201 | 201 | ||
| 202 | if (!netif_running(dev)) { | 202 | if (!netif_running(dev)) { |
| @@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | ops = dev->netdev_ops; | 207 | ops = dev->netdev_ops; |
| 208 | if (!ops->ndo_poll_controller) { | 208 | if (ops->ndo_poll_controller) |
| 209 | up(&ni->dev_lock); | 209 | ops->ndo_poll_controller(dev); |
| 210 | return; | ||
| 211 | } | ||
| 212 | |||
| 213 | /* Process pending work on NIC */ | ||
| 214 | ops->ndo_poll_controller(dev); | ||
| 215 | 210 | ||
| 216 | poll_napi(dev); | 211 | poll_napi(dev); |
| 217 | 212 | ||
| @@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
| 219 | 214 | ||
| 220 | zap_completion_queue(); | 215 | zap_completion_queue(); |
| 221 | } | 216 | } |
| 217 | EXPORT_SYMBOL(netpoll_poll_dev); | ||
| 222 | 218 | ||
| 223 | void netpoll_poll_disable(struct net_device *dev) | 219 | void netpoll_poll_disable(struct net_device *dev) |
| 224 | { | 220 | { |
| @@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |||
| 613 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); | 609 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
| 614 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); | 610 | INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
| 615 | 611 | ||
| 616 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || | 612 | if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { |
| 617 | !ndev->netdev_ops->ndo_poll_controller) { | ||
| 618 | np_err(np, "%s doesn't support polling, aborting\n", | 613 | np_err(np, "%s doesn't support polling, aborting\n", |
| 619 | np->dev_name); | 614 | np->dev_name); |
| 620 | err = -ENOTSUPP; | 615 | err = -ENOTSUPP; |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c4f5602308ed..284a22154b4e 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
| @@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 627 | const struct iphdr *tnl_params, u8 protocol) | 627 | const struct iphdr *tnl_params, u8 protocol) |
| 628 | { | 628 | { |
| 629 | struct ip_tunnel *tunnel = netdev_priv(dev); | 629 | struct ip_tunnel *tunnel = netdev_priv(dev); |
| 630 | unsigned int inner_nhdr_len = 0; | ||
| 630 | const struct iphdr *inner_iph; | 631 | const struct iphdr *inner_iph; |
| 631 | struct flowi4 fl4; | 632 | struct flowi4 fl4; |
| 632 | u8 tos, ttl; | 633 | u8 tos, ttl; |
| @@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 636 | __be32 dst; | 637 | __be32 dst; |
| 637 | bool connected; | 638 | bool connected; |
| 638 | 639 | ||
| 640 | /* ensure we can access the inner net header, for several users below */ | ||
| 641 | if (skb->protocol == htons(ETH_P_IP)) | ||
| 642 | inner_nhdr_len = sizeof(struct iphdr); | ||
| 643 | else if (skb->protocol == htons(ETH_P_IPV6)) | ||
| 644 | inner_nhdr_len = sizeof(struct ipv6hdr); | ||
| 645 | if (unlikely(!pskb_may_pull(skb, inner_nhdr_len))) | ||
| 646 | goto tx_error; | ||
| 647 | |||
| 639 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); | 648 | inner_iph = (const struct iphdr *)skb_inner_network_header(skb); |
| 640 | connected = (tunnel->parms.iph.daddr != 0); | 649 | connected = (tunnel->parms.iph.daddr != 0); |
| 641 | 650 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d51a8c0b3372..c63ccce6425f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos) | |||
| 4201 | p++; | 4201 | p++; |
| 4202 | continue; | 4202 | continue; |
| 4203 | } | 4203 | } |
| 4204 | state->offset++; | ||
| 4205 | return ifa; | 4204 | return ifa; |
| 4206 | } | 4205 | } |
| 4207 | 4206 | ||
| @@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, | |||
| 4225 | return ifa; | 4224 | return ifa; |
| 4226 | } | 4225 | } |
| 4227 | 4226 | ||
| 4227 | state->offset = 0; | ||
| 4228 | while (++state->bucket < IN6_ADDR_HSIZE) { | 4228 | while (++state->bucket < IN6_ADDR_HSIZE) { |
| 4229 | state->offset = 0; | ||
| 4230 | hlist_for_each_entry_rcu(ifa, | 4229 | hlist_for_each_entry_rcu(ifa, |
| 4231 | &inet6_addr_lst[state->bucket], addr_lst) { | 4230 | &inet6_addr_lst[state->bucket], addr_lst) { |
| 4232 | if (!net_eq(dev_net(ifa->idev->dev), net)) | 4231 | if (!net_eq(dev_net(ifa->idev->dev), net)) |
| 4233 | continue; | 4232 | continue; |
| 4234 | state->offset++; | ||
| 4235 | return ifa; | 4233 | return ifa; |
| 4236 | } | 4234 | } |
| 4237 | } | 4235 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 419960b0ba16..a0b6932c3afd 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -1234,7 +1234,7 @@ static inline int | |||
| 1234 | ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1234 | ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1235 | { | 1235 | { |
| 1236 | struct ip6_tnl *t = netdev_priv(dev); | 1236 | struct ip6_tnl *t = netdev_priv(dev); |
| 1237 | const struct iphdr *iph = ip_hdr(skb); | 1237 | const struct iphdr *iph; |
| 1238 | int encap_limit = -1; | 1238 | int encap_limit = -1; |
| 1239 | struct flowi6 fl6; | 1239 | struct flowi6 fl6; |
| 1240 | __u8 dsfield; | 1240 | __u8 dsfield; |
| @@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1242 | u8 tproto; | 1242 | u8 tproto; |
| 1243 | int err; | 1243 | int err; |
| 1244 | 1244 | ||
| 1245 | /* ensure we can access the full inner ip header */ | ||
| 1246 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | ||
| 1247 | return -1; | ||
| 1248 | |||
| 1249 | iph = ip_hdr(skb); | ||
| 1245 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1250 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
| 1246 | 1251 | ||
| 1247 | tproto = READ_ONCE(t->parms.proto); | 1252 | tproto = READ_ONCE(t->parms.proto); |
| @@ -1306,7 +1311,7 @@ static inline int | |||
| 1306 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | 1311 | ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1307 | { | 1312 | { |
| 1308 | struct ip6_tnl *t = netdev_priv(dev); | 1313 | struct ip6_tnl *t = netdev_priv(dev); |
| 1309 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 1314 | struct ipv6hdr *ipv6h; |
| 1310 | int encap_limit = -1; | 1315 | int encap_limit = -1; |
| 1311 | __u16 offset; | 1316 | __u16 offset; |
| 1312 | struct flowi6 fl6; | 1317 | struct flowi6 fl6; |
| @@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 1315 | u8 tproto; | 1320 | u8 tproto; |
| 1316 | int err; | 1321 | int err; |
| 1317 | 1322 | ||
| 1323 | if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) | ||
| 1324 | return -1; | ||
| 1325 | |||
| 1326 | ipv6h = ipv6_hdr(skb); | ||
| 1318 | tproto = READ_ONCE(t->parms.proto); | 1327 | tproto = READ_ONCE(t->parms.proto); |
| 1319 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || | 1328 | if ((tproto != IPPROTO_IPV6 && tproto != 0) || |
| 1320 | ip6_tnl_addr_conflict(t, ipv6h)) | 1329 | ip6_tnl_addr_conflict(t, ipv6h)) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 480a79f47c52..826b14de7dbb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc); | |||
| 364 | 364 | ||
| 365 | static void ip6_dst_destroy(struct dst_entry *dst) | 365 | static void ip6_dst_destroy(struct dst_entry *dst) |
| 366 | { | 366 | { |
| 367 | struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); | ||
| 367 | struct rt6_info *rt = (struct rt6_info *)dst; | 368 | struct rt6_info *rt = (struct rt6_info *)dst; |
| 368 | struct fib6_info *from; | 369 | struct fib6_info *from; |
| 369 | struct inet6_dev *idev; | 370 | struct inet6_dev *idev; |
| 370 | 371 | ||
| 371 | dst_destroy_metrics_generic(dst); | 372 | if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) |
| 373 | kfree(p); | ||
| 374 | |||
| 372 | rt6_uncached_list_del(rt); | 375 | rt6_uncached_list_del(rt); |
| 373 | 376 | ||
| 374 | idev = rt->rt6i_idev; | 377 | idev = rt->rt6i_idev; |
| @@ -976,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from) | |||
| 976 | rt->rt6i_flags &= ~RTF_EXPIRES; | 979 | rt->rt6i_flags &= ~RTF_EXPIRES; |
| 977 | rcu_assign_pointer(rt->from, from); | 980 | rcu_assign_pointer(rt->from, from); |
| 978 | dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); | 981 | dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); |
| 982 | if (from->fib6_metrics != &dst_default_metrics) { | ||
| 983 | rt->dst._metrics |= DST_METRICS_REFCOUNTED; | ||
| 984 | refcount_inc(&from->fib6_metrics->refcnt); | ||
| 985 | } | ||
| 979 | } | 986 | } |
| 980 | 987 | ||
| 981 | /* Caller must already hold reference to @ort */ | 988 | /* Caller must already hold reference to @ort */ |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 7a4de6d618b1..8fbe6cdbe255 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
| @@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, | |||
| 1533 | unsigned int flags; | 1533 | unsigned int flags; |
| 1534 | 1534 | ||
| 1535 | if (event == NETDEV_REGISTER) { | 1535 | if (event == NETDEV_REGISTER) { |
| 1536 | /* For now just support Ethernet, IPGRE, SIT and IPIP devices */ | 1536 | |
| 1537 | /* For now just support Ethernet, IPGRE, IP6GRE, SIT and | ||
| 1538 | * IPIP devices | ||
| 1539 | */ | ||
| 1537 | if (dev->type == ARPHRD_ETHER || | 1540 | if (dev->type == ARPHRD_ETHER || |
| 1538 | dev->type == ARPHRD_LOOPBACK || | 1541 | dev->type == ARPHRD_LOOPBACK || |
| 1539 | dev->type == ARPHRD_IPGRE || | 1542 | dev->type == ARPHRD_IPGRE || |
| 1543 | dev->type == ARPHRD_IP6GRE || | ||
| 1540 | dev->type == ARPHRD_SIT || | 1544 | dev->type == ARPHRD_SIT || |
| 1541 | dev->type == ARPHRD_TUNNEL) { | 1545 | dev->type == ARPHRD_TUNNEL) { |
| 1542 | mdev = mpls_add_dev(dev); | 1546 | mdev = mpls_add_dev(dev); |
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index c070dfc0190a..c92894c3e40a 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c | |||
| @@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info, | |||
| 781 | { | 781 | { |
| 782 | u32 addr_len; | 782 | u32 addr_len; |
| 783 | 783 | ||
| 784 | if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) { | 784 | if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] && |
| 785 | info->attrs[NLBL_UNLABEL_A_IPV4MASK]) { | ||
| 785 | addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); | 786 | addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]); |
| 786 | if (addr_len != sizeof(struct in_addr) && | 787 | if (addr_len != sizeof(struct in_addr) && |
| 787 | addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) | 788 | addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK])) |
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c index ac8030c4bcf8..19cb2e473ea6 100644 --- a/net/nfc/hci/core.c +++ b/net/nfc/hci/core.c | |||
| @@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, | |||
| 209 | } | 209 | } |
| 210 | create_info = (struct hci_create_pipe_resp *)skb->data; | 210 | create_info = (struct hci_create_pipe_resp *)skb->data; |
| 211 | 211 | ||
| 212 | if (create_info->pipe >= NFC_HCI_MAX_PIPES) { | ||
| 213 | status = NFC_HCI_ANY_E_NOK; | ||
| 214 | goto exit; | ||
| 215 | } | ||
| 216 | |||
| 212 | /* Save the new created pipe and bind with local gate, | 217 | /* Save the new created pipe and bind with local gate, |
| 213 | * the description for skb->data[3] is destination gate id | 218 | * the description for skb->data[3] is destination gate id |
| 214 | * but since we received this cmd from host controller, we | 219 | * but since we received this cmd from host controller, we |
| @@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, | |||
| 232 | } | 237 | } |
| 233 | delete_info = (struct hci_delete_pipe_noti *)skb->data; | 238 | delete_info = (struct hci_delete_pipe_noti *)skb->data; |
| 234 | 239 | ||
| 240 | if (delete_info->pipe >= NFC_HCI_MAX_PIPES) { | ||
| 241 | status = NFC_HCI_ANY_E_NOK; | ||
| 242 | goto exit; | ||
| 243 | } | ||
| 244 | |||
| 235 | hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; | 245 | hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE; |
| 236 | hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; | 246 | hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST; |
| 237 | break; | 247 | break; |
diff --git a/net/rds/ib.h b/net/rds/ib.h index 73427ff439f9..71ff356ee702 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
| @@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, | |||
| 443 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); | 443 | int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op); |
| 444 | 444 | ||
| 445 | /* ib_stats.c */ | 445 | /* ib_stats.c */ |
| 446 | DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats); | 446 | DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats); |
| 447 | #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) | 447 | #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member) |
| 448 | #define rds_ib_stats_add(member, count) \ | 448 | #define rds_ib_stats_add(member, count) \ |
| 449 | rds_stats_add_which(rds_ib_stats, member, count) | 449 | rds_stats_add_which(rds_ib_stats, member, count) |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 12cac85da994..033696e6f74f 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
| @@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) | |||
| 260 | bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | 260 | bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) |
| 261 | { | 261 | { |
| 262 | struct dst_entry *dst = sctp_transport_dst_check(t); | 262 | struct dst_entry *dst = sctp_transport_dst_check(t); |
| 263 | struct sock *sk = t->asoc->base.sk; | ||
| 263 | bool change = true; | 264 | bool change = true; |
| 264 | 265 | ||
| 265 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { | 266 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { |
| @@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) | |||
| 271 | pmtu = SCTP_TRUNC4(pmtu); | 272 | pmtu = SCTP_TRUNC4(pmtu); |
| 272 | 273 | ||
| 273 | if (dst) { | 274 | if (dst) { |
| 274 | dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); | 275 | struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family); |
| 276 | union sctp_addr addr; | ||
| 277 | |||
| 278 | pf->af->from_sk(&addr, sk); | ||
| 279 | pf->to_sk_daddr(&t->ipaddr, sk); | ||
| 280 | dst->ops->update_pmtu(dst, sk, NULL, pmtu); | ||
| 281 | pf->to_sk_daddr(&addr, sk); | ||
| 282 | |||
| 275 | dst = sctp_transport_dst_check(t); | 283 | dst = sctp_transport_dst_check(t); |
| 276 | } | 284 | } |
| 277 | 285 | ||
| 278 | if (!dst) { | 286 | if (!dst) { |
| 279 | t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); | 287 | t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); |
| 280 | dst = t->dst; | 288 | dst = t->dst; |
| 281 | } | 289 | } |
| 282 | 290 | ||
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 2d8a1e15e4f9..015231789ed2 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work) | |||
| 742 | smc->sk.sk_err = -rc; | 742 | smc->sk.sk_err = -rc; |
| 743 | 743 | ||
| 744 | out: | 744 | out: |
| 745 | smc->sk.sk_state_change(&smc->sk); | 745 | if (smc->sk.sk_err) |
| 746 | smc->sk.sk_state_change(&smc->sk); | ||
| 747 | else | ||
| 748 | smc->sk.sk_write_space(&smc->sk); | ||
| 746 | kfree(smc->connect_info); | 749 | kfree(smc->connect_info); |
| 747 | smc->connect_info = NULL; | 750 | smc->connect_info = NULL; |
| 748 | release_sock(&smc->sk); | 751 | release_sock(&smc->sk); |
| @@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact) | |||
| 1150 | } | 1153 | } |
| 1151 | 1154 | ||
| 1152 | /* listen worker: finish RDMA setup */ | 1155 | /* listen worker: finish RDMA setup */ |
| 1153 | static void smc_listen_rdma_finish(struct smc_sock *new_smc, | 1156 | static int smc_listen_rdma_finish(struct smc_sock *new_smc, |
| 1154 | struct smc_clc_msg_accept_confirm *cclc, | 1157 | struct smc_clc_msg_accept_confirm *cclc, |
| 1155 | int local_contact) | 1158 | int local_contact) |
| 1156 | { | 1159 | { |
| 1157 | struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; | 1160 | struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; |
| 1158 | int reason_code = 0; | 1161 | int reason_code = 0; |
| @@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc, | |||
| 1175 | if (reason_code) | 1178 | if (reason_code) |
| 1176 | goto decline; | 1179 | goto decline; |
| 1177 | } | 1180 | } |
| 1178 | return; | 1181 | return 0; |
| 1179 | 1182 | ||
| 1180 | decline: | 1183 | decline: |
| 1181 | mutex_unlock(&smc_create_lgr_pending); | 1184 | mutex_unlock(&smc_create_lgr_pending); |
| 1182 | smc_listen_decline(new_smc, reason_code, local_contact); | 1185 | smc_listen_decline(new_smc, reason_code, local_contact); |
| 1186 | return reason_code; | ||
| 1183 | } | 1187 | } |
| 1184 | 1188 | ||
| 1185 | /* setup for RDMA connection of server */ | 1189 | /* setup for RDMA connection of server */ |
| @@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work) | |||
| 1276 | } | 1280 | } |
| 1277 | 1281 | ||
| 1278 | /* finish worker */ | 1282 | /* finish worker */ |
| 1279 | if (!ism_supported) | 1283 | if (!ism_supported) { |
| 1280 | smc_listen_rdma_finish(new_smc, &cclc, local_contact); | 1284 | if (smc_listen_rdma_finish(new_smc, &cclc, local_contact)) |
| 1285 | return; | ||
| 1286 | } | ||
| 1281 | smc_conn_save_peer_info(new_smc, &cclc); | 1287 | smc_conn_save_peer_info(new_smc, &cclc); |
| 1282 | mutex_unlock(&smc_create_lgr_pending); | 1288 | mutex_unlock(&smc_create_lgr_pending); |
| 1283 | smc_listen_out_connected(new_smc); | 1289 | smc_listen_out_connected(new_smc); |
| @@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
| 1529 | return EPOLLNVAL; | 1535 | return EPOLLNVAL; |
| 1530 | 1536 | ||
| 1531 | smc = smc_sk(sock->sk); | 1537 | smc = smc_sk(sock->sk); |
| 1532 | if ((sk->sk_state == SMC_INIT) || smc->use_fallback) { | 1538 | if (smc->use_fallback) { |
| 1533 | /* delegate to CLC child sock */ | 1539 | /* delegate to CLC child sock */ |
| 1534 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); | 1540 | mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); |
| 1535 | sk->sk_err = smc->clcsock->sk->sk_err; | 1541 | sk->sk_err = smc->clcsock->sk->sk_err; |
| @@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, | |||
| 1560 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; | 1566 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; |
| 1561 | if (sk->sk_state == SMC_APPCLOSEWAIT1) | 1567 | if (sk->sk_state == SMC_APPCLOSEWAIT1) |
| 1562 | mask |= EPOLLIN; | 1568 | mask |= EPOLLIN; |
| 1569 | if (smc->conn.urg_state == SMC_URG_VALID) | ||
| 1570 | mask |= EPOLLPRI; | ||
| 1563 | } | 1571 | } |
| 1564 | if (smc->conn.urg_state == SMC_URG_VALID) | ||
| 1565 | mask |= EPOLLPRI; | ||
| 1566 | } | 1572 | } |
| 1567 | 1573 | ||
| 1568 | return mask; | 1574 | return mask; |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 83aba9ade060..52241d679cc9 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
| @@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type, | |||
| 446 | vec[i++].iov_len = sizeof(trl); | 446 | vec[i++].iov_len = sizeof(trl); |
| 447 | /* due to the few bytes needed for clc-handshake this cannot block */ | 447 | /* due to the few bytes needed for clc-handshake this cannot block */ |
| 448 | len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); | 448 | len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen); |
| 449 | if (len < sizeof(pclc)) { | 449 | if (len < 0) { |
| 450 | if (len >= 0) { | 450 | smc->sk.sk_err = smc->clcsock->sk->sk_err; |
| 451 | reason_code = -ENETUNREACH; | 451 | reason_code = -smc->sk.sk_err; |
| 452 | smc->sk.sk_err = -reason_code; | 452 | } else if (len < (int)sizeof(pclc)) { |
| 453 | } else { | 453 | reason_code = -ENETUNREACH; |
| 454 | smc->sk.sk_err = smc->clcsock->sk->sk_err; | 454 | smc->sk.sk_err = -reason_code; |
| 455 | reason_code = -smc->sk.sk_err; | ||
| 456 | } | ||
| 457 | } | 455 | } |
| 458 | 456 | ||
| 459 | return reason_code; | 457 | return reason_code; |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ac961dfb1ea1..ea2b87f29469 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
| @@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc) | |||
| 100 | struct smc_cdc_conn_state_flags *txflags = | 100 | struct smc_cdc_conn_state_flags *txflags = |
| 101 | &smc->conn.local_tx_ctrl.conn_state_flags; | 101 | &smc->conn.local_tx_ctrl.conn_state_flags; |
| 102 | 102 | ||
| 103 | sk->sk_err = ECONNABORTED; | 103 | if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) { |
| 104 | if (smc->clcsock && smc->clcsock->sk) { | 104 | sk->sk_err = ECONNABORTED; |
| 105 | smc->clcsock->sk->sk_err = ECONNABORTED; | 105 | if (smc->clcsock && smc->clcsock->sk) { |
| 106 | smc->clcsock->sk->sk_state_change(smc->clcsock->sk); | 106 | smc->clcsock->sk->sk_err = ECONNABORTED; |
| 107 | smc->clcsock->sk->sk_state_change(smc->clcsock->sk); | ||
| 108 | } | ||
| 107 | } | 109 | } |
| 108 | switch (sk->sk_state) { | 110 | switch (sk->sk_state) { |
| 109 | case SMC_INIT: | ||
| 110 | sk->sk_state = SMC_PEERABORTWAIT; | ||
| 111 | break; | ||
| 112 | case SMC_ACTIVE: | 111 | case SMC_ACTIVE: |
| 113 | sk->sk_state = SMC_PEERABORTWAIT; | 112 | sk->sk_state = SMC_PEERABORTWAIT; |
| 114 | release_sock(sk); | 113 | release_sock(sk); |
| @@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc) | |||
| 143 | case SMC_PEERFINCLOSEWAIT: | 142 | case SMC_PEERFINCLOSEWAIT: |
| 144 | sock_put(sk); /* passive closing */ | 143 | sock_put(sk); /* passive closing */ |
| 145 | break; | 144 | break; |
| 145 | case SMC_INIT: | ||
| 146 | case SMC_PEERABORTWAIT: | 146 | case SMC_PEERABORTWAIT: |
| 147 | case SMC_CLOSED: | 147 | case SMC_CLOSED: |
| 148 | break; | 148 | break; |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 01c6ce042a1c..7cb3e4f07c10 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
| @@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = { | |||
| 461 | }; | 461 | }; |
| 462 | 462 | ||
| 463 | /* SMC_PNETID family definition */ | 463 | /* SMC_PNETID family definition */ |
| 464 | static struct genl_family smc_pnet_nl_family = { | 464 | static struct genl_family smc_pnet_nl_family __ro_after_init = { |
| 465 | .hdrsize = 0, | 465 | .hdrsize = 0, |
| 466 | .name = SMCR_GENL_FAMILY_NAME, | 466 | .name = SMCR_GENL_FAMILY_NAME, |
| 467 | .version = SMCR_GENL_FAMILY_VERSION, | 467 | .version = SMCR_GENL_FAMILY_VERSION, |
diff --git a/security/keys/dh.c b/security/keys/dh.c index 3b602a1e27fa..711e89d8c415 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c | |||
| @@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params, | |||
| 300 | } | 300 | } |
| 301 | dh_inputs.g_size = dlen; | 301 | dh_inputs.g_size = dlen; |
| 302 | 302 | ||
| 303 | dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key); | 303 | dlen = dh_data_from_key(pcopy.private, &dh_inputs.key); |
| 304 | if (dlen < 0) { | 304 | if (dlen < 0) { |
| 305 | ret = dlen; | 305 | ret = dlen; |
| 306 | goto out2; | 306 | goto out2; |
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h index 664ced8cb1b0..e907ba6f15e5 100644 --- a/tools/include/tools/libc_compat.h +++ b/tools/include/tools/libc_compat.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause) |
| 2 | /* Copyright (C) 2018 Netronome Systems, Inc. */ | 2 | /* Copyright (C) 2018 Netronome Systems, Inc. */ |
| 3 | 3 | ||
| 4 | #ifndef __TOOLS_LIBC_COMPAT_H | 4 | #ifndef __TOOLS_LIBC_COMPAT_H |
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 6f54f84144a0..9b552c0fc47d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c | |||
| @@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data) | |||
| 580 | /* Test update without programs */ | 580 | /* Test update without programs */ |
| 581 | for (i = 0; i < 6; i++) { | 581 | for (i = 0; i < 6; i++) { |
| 582 | err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); | 582 | err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); |
| 583 | if (err) { | 583 | if (i < 2 && !err) { |
| 584 | printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n", | ||
| 585 | i, sfd[i]); | ||
| 586 | goto out_sockmap; | ||
| 587 | } else if (i >= 2 && err) { | ||
| 584 | printf("Failed noprog update sockmap '%i:%i'\n", | 588 | printf("Failed noprog update sockmap '%i:%i'\n", |
| 585 | i, sfd[i]); | 589 | i, sfd[i]); |
| 586 | goto out_sockmap; | 590 | goto out_sockmap; |
| @@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data) | |||
| 741 | } | 745 | } |
| 742 | 746 | ||
| 743 | /* Test map update elem afterwards fd lives in fd and map_fd */ | 747 | /* Test map update elem afterwards fd lives in fd and map_fd */ |
| 744 | for (i = 0; i < 6; i++) { | 748 | for (i = 2; i < 6; i++) { |
| 745 | err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY); | 749 | err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY); |
| 746 | if (err) { | 750 | if (err) { |
| 747 | printf("Failed map_fd_rx update sockmap %i '%i:%i'\n", | 751 | printf("Failed map_fd_rx update sockmap %i '%i:%i'\n", |
| @@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data) | |||
| 845 | } | 849 | } |
| 846 | 850 | ||
| 847 | /* Delete the elems without programs */ | 851 | /* Delete the elems without programs */ |
| 848 | for (i = 0; i < 6; i++) { | 852 | for (i = 2; i < 6; i++) { |
| 849 | err = bpf_map_delete_elem(fd, &i); | 853 | err = bpf_map_delete_elem(fd, &i); |
| 850 | if (err) { | 854 | if (err) { |
| 851 | printf("Failed delete sockmap %i '%i:%i'\n", | 855 | printf("Failed delete sockmap %i '%i:%i'\n", |
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 32a194e3e07a..0ab9423d009f 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh | |||
| @@ -178,8 +178,8 @@ setup() { | |||
| 178 | 178 | ||
| 179 | cleanup() { | 179 | cleanup() { |
| 180 | [ ${cleanup_done} -eq 1 ] && return | 180 | [ ${cleanup_done} -eq 1 ] && return |
| 181 | ip netns del ${NS_A} 2 > /dev/null | 181 | ip netns del ${NS_A} 2> /dev/null |
| 182 | ip netns del ${NS_B} 2 > /dev/null | 182 | ip netns del ${NS_B} 2> /dev/null |
| 183 | cleanup_done=1 | 183 | cleanup_done=1 |
| 184 | } | 184 | } |
| 185 | 185 | ||
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile index 93baacab7693..d056486f49de 100644 --- a/tools/testing/selftests/powerpc/alignment/Makefile +++ b/tools/testing/selftests/powerpc/alignment/Makefile | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | TEST_GEN_PROGS := copy_first_unaligned alignment_handler | 1 | TEST_GEN_PROGS := copy_first_unaligned alignment_handler |
| 2 | 2 | ||
| 3 | top_srcdir = ../../../../.. | ||
| 3 | include ../../lib.mk | 4 | include ../../lib.mk |
| 4 | 5 | ||
| 5 | $(TEST_GEN_PROGS): ../harness.c ../utils.c | 6 | $(TEST_GEN_PROGS): ../harness.c ../utils.c |
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile index b4d7432a0ecd..d40300a65b42 100644 --- a/tools/testing/selftests/powerpc/benchmarks/Makefile +++ b/tools/testing/selftests/powerpc/benchmarks/Makefile | |||
| @@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target | |||
| 4 | 4 | ||
| 5 | CFLAGS += -O2 | 5 | CFLAGS += -O2 |
| 6 | 6 | ||
| 7 | top_srcdir = ../../../../.. | ||
| 7 | include ../../lib.mk | 8 | include ../../lib.mk |
| 8 | 9 | ||
| 9 | $(TEST_GEN_PROGS): ../harness.c | 10 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile index 1be547434a49..ede4d3dae750 100644 --- a/tools/testing/selftests/powerpc/cache_shape/Makefile +++ b/tools/testing/selftests/powerpc/cache_shape/Makefile | |||
| @@ -5,6 +5,7 @@ all: $(TEST_PROGS) | |||
| 5 | 5 | ||
| 6 | $(TEST_PROGS): ../harness.c ../utils.c | 6 | $(TEST_PROGS): ../harness.c ../utils.c |
| 7 | 7 | ||
| 8 | top_srcdir = ../../../../.. | ||
| 8 | include ../../lib.mk | 9 | include ../../lib.mk |
| 9 | 10 | ||
| 10 | clean: | 11 | clean: |
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile index 1cf89a34d97c..44574f3818b3 100644 --- a/tools/testing/selftests/powerpc/copyloops/Makefile +++ b/tools/testing/selftests/powerpc/copyloops/Makefile | |||
| @@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \ | |||
| 17 | 17 | ||
| 18 | EXTRA_SOURCES := validate.c ../harness.c stubs.S | 18 | EXTRA_SOURCES := validate.c ../harness.c stubs.S |
| 19 | 19 | ||
| 20 | top_srcdir = ../../../../.. | ||
| 20 | include ../../lib.mk | 21 | include ../../lib.mk |
| 21 | 22 | ||
| 22 | $(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES) | 23 | $(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES) |
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile index 55d7db7a616b..5df476364b4d 100644 --- a/tools/testing/selftests/powerpc/dscr/Makefile +++ b/tools/testing/selftests/powerpc/dscr/Makefile | |||
| @@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \ | |||
| 3 | dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ | 3 | dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \ |
| 4 | dscr_sysfs_thread_test | 4 | dscr_sysfs_thread_test |
| 5 | 5 | ||
| 6 | top_srcdir = ../../../../.. | ||
| 6 | include ../../lib.mk | 7 | include ../../lib.mk |
| 7 | 8 | ||
| 8 | $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread | 9 | $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread |
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile index 0dd3a01fdab9..11a10d7a2bbd 100644 --- a/tools/testing/selftests/powerpc/math/Makefile +++ b/tools/testing/selftests/powerpc/math/Makefile | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt | 2 | TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt |
| 3 | 3 | ||
| 4 | top_srcdir = ../../../../.. | ||
| 4 | include ../../lib.mk | 5 | include ../../lib.mk |
| 5 | 6 | ||
| 6 | $(TEST_GEN_PROGS): ../harness.c | 7 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile index 8ebbe96d80a8..33ced6e0ad25 100644 --- a/tools/testing/selftests/powerpc/mm/Makefile +++ b/tools/testing/selftests/powerpc/mm/Makefile | |||
| @@ -5,6 +5,7 @@ noarg: | |||
| 5 | TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors | 5 | TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors |
| 6 | TEST_GEN_FILES := tempfile | 6 | TEST_GEN_FILES := tempfile |
| 7 | 7 | ||
| 8 | top_srcdir = ../../../../.. | ||
| 8 | include ../../lib.mk | 9 | include ../../lib.mk |
| 9 | 10 | ||
| 10 | $(TEST_GEN_PROGS): ../harness.c | 11 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 6e1629bf5b09..19046db995fe 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile | |||
| @@ -5,6 +5,7 @@ noarg: | |||
| 5 | TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes | 5 | TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes |
| 6 | EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c | 6 | EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c |
| 7 | 7 | ||
| 8 | top_srcdir = ../../../../.. | ||
| 8 | include ../../lib.mk | 9 | include ../../lib.mk |
| 9 | 10 | ||
| 10 | all: $(TEST_GEN_PROGS) ebb | 11 | all: $(TEST_GEN_PROGS) ebb |
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile index c4e64bc2e265..bd5dfa509272 100644 --- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile +++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile | |||
| @@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \ | |||
| 17 | lost_exception_test no_handler_test \ | 17 | lost_exception_test no_handler_test \ |
| 18 | cycles_with_mmcr2_test | 18 | cycles_with_mmcr2_test |
| 19 | 19 | ||
| 20 | top_srcdir = ../../../../../.. | ||
| 20 | include ../../../lib.mk | 21 | include ../../../lib.mk |
| 21 | 22 | ||
| 22 | $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ | 23 | $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \ |
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile index 175366db7be8..ea2b7bd09e36 100644 --- a/tools/testing/selftests/powerpc/primitives/Makefile +++ b/tools/testing/selftests/powerpc/primitives/Makefile | |||
| @@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR) | |||
| 2 | 2 | ||
| 3 | TEST_GEN_PROGS := load_unaligned_zeropad | 3 | TEST_GEN_PROGS := load_unaligned_zeropad |
| 4 | 4 | ||
| 5 | top_srcdir = ../../../../.. | ||
| 5 | include ../../lib.mk | 6 | include ../../lib.mk |
| 6 | 7 | ||
| 7 | $(TEST_GEN_PROGS): ../harness.c | 8 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile index 28f5b781a553..923d531265f8 100644 --- a/tools/testing/selftests/powerpc/ptrace/Makefile +++ b/tools/testing/selftests/powerpc/ptrace/Makefile | |||
| @@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \ | |||
| 4 | ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ | 4 | ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \ |
| 5 | perf-hwbreak | 5 | perf-hwbreak |
| 6 | 6 | ||
| 7 | top_srcdir = ../../../../.. | ||
| 7 | include ../../lib.mk | 8 | include ../../lib.mk |
| 8 | 9 | ||
| 9 | all: $(TEST_PROGS) | 10 | all: $(TEST_PROGS) |
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile index a7cbd5082e27..1fca25c6ace0 100644 --- a/tools/testing/selftests/powerpc/signal/Makefile +++ b/tools/testing/selftests/powerpc/signal/Makefile | |||
| @@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S | |||
| 8 | CFLAGS += -maltivec | 8 | CFLAGS += -maltivec |
| 9 | signal_tm: CFLAGS += -mhtm | 9 | signal_tm: CFLAGS += -mhtm |
| 10 | 10 | ||
| 11 | top_srcdir = ../../../../.. | ||
| 11 | include ../../lib.mk | 12 | include ../../lib.mk |
| 12 | 13 | ||
| 13 | clean: | 14 | clean: |
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile index 10b35c87a4f4..7fc0623d85c3 100644 --- a/tools/testing/selftests/powerpc/stringloops/Makefile +++ b/tools/testing/selftests/powerpc/stringloops/Makefile | |||
| @@ -29,6 +29,7 @@ endif | |||
| 29 | 29 | ||
| 30 | ASFLAGS = $(CFLAGS) | 30 | ASFLAGS = $(CFLAGS) |
| 31 | 31 | ||
| 32 | top_srcdir = ../../../../.. | ||
| 32 | include ../../lib.mk | 33 | include ../../lib.mk |
| 33 | 34 | ||
| 34 | $(TEST_GEN_PROGS): $(EXTRA_SOURCES) | 35 | $(TEST_GEN_PROGS): $(EXTRA_SOURCES) |
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile index 30b8ff8fb82e..fcd2dcb8972b 100644 --- a/tools/testing/selftests/powerpc/switch_endian/Makefile +++ b/tools/testing/selftests/powerpc/switch_endian/Makefile | |||
| @@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64 | |||
| 5 | 5 | ||
| 6 | EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S | 6 | EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S |
| 7 | 7 | ||
| 8 | top_srcdir = ../../../../.. | ||
| 8 | include ../../lib.mk | 9 | include ../../lib.mk |
| 9 | 10 | ||
| 10 | $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S | 11 | $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S |
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile index da22ca7c38c1..161b8846336f 100644 --- a/tools/testing/selftests/powerpc/syscalls/Makefile +++ b/tools/testing/selftests/powerpc/syscalls/Makefile | |||
| @@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed | |||
| 2 | 2 | ||
| 3 | CFLAGS += -I../../../../../usr/include | 3 | CFLAGS += -I../../../../../usr/include |
| 4 | 4 | ||
| 5 | top_srcdir = ../../../../.. | ||
| 5 | include ../../lib.mk | 6 | include ../../lib.mk |
| 6 | 7 | ||
| 7 | $(TEST_GEN_PROGS): ../harness.c | 8 | $(TEST_GEN_PROGS): ../harness.c |
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index c0e45d2dde25..9fc2cf6fbc92 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile | |||
| @@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack | |||
| 6 | tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ | 6 | tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \ |
| 7 | $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn | 7 | $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn |
| 8 | 8 | ||
| 9 | top_srcdir = ../../../../.. | ||
| 9 | include ../../lib.mk | 10 | include ../../lib.mk |
| 10 | 11 | ||
| 11 | $(TEST_GEN_PROGS): ../harness.c ../utils.c | 12 | $(TEST_GEN_PROGS): ../harness.c ../utils.c |
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile index f8ced26748f8..fb82068c9fda 100644 --- a/tools/testing/selftests/powerpc/vphn/Makefile +++ b/tools/testing/selftests/powerpc/vphn/Makefile | |||
| @@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn | |||
| 2 | 2 | ||
| 3 | CFLAGS += -m64 | 3 | CFLAGS += -m64 |
| 4 | 4 | ||
| 5 | top_srcdir = ../../../../.. | ||
| 5 | include ../../lib.mk | 6 | include ../../lib.mk |
| 6 | 7 | ||
| 7 | $(TEST_GEN_PROGS): ../harness.c | 8 | $(TEST_GEN_PROGS): ../harness.c |
