diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-08-31 04:25:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-08-31 04:25:26 -0400 |
commit | 02b643b643254ec79b5f9aaa143e10be68eabdab (patch) | |
tree | 57ec83eebab74324465199a3f52f4f7862177ee6 | |
parent | d1ee8bc195ffedbf91af0245a2406d6ebd2578f8 (diff) | |
parent | 4c09e0d6ba65507a0ee0ca9abc5335e4f7bd7404 (diff) |
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
124 files changed, 855 insertions, 1087 deletions
diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore new file mode 100644 index 000000000000..cca6d870f7a5 --- /dev/null +++ b/.get_maintainer.ignore | |||
@@ -0,0 +1 @@ | |||
Christoph Hellwig <hch@lst.de> | |||
diff --git a/MAINTAINERS b/MAINTAINERS index 569568f6644f..b60e2b2369d2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5849,6 +5849,7 @@ S: Odd Fixes | |||
5849 | 5849 | ||
5850 | KERNEL NFSD, SUNRPC, AND LOCKD SERVERS | 5850 | KERNEL NFSD, SUNRPC, AND LOCKD SERVERS |
5851 | M: "J. Bruce Fields" <bfields@fieldses.org> | 5851 | M: "J. Bruce Fields" <bfields@fieldses.org> |
5852 | M: Jeff Layton <jlayton@poochiereds.net> | ||
5852 | L: linux-nfs@vger.kernel.org | 5853 | L: linux-nfs@vger.kernel.org |
5853 | W: http://nfs.sourceforge.net/ | 5854 | W: http://nfs.sourceforge.net/ |
5854 | S: Supported | 5855 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 2 | 2 | PATCHLEVEL = 2 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc7 | 4 | EXTRAVERSION = |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 07ab3d203916..7451b447cc2d 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install | |||
312 | 312 | ||
313 | PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) | 313 | PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) |
314 | 314 | ||
315 | bootpImage uImage: zImage | ||
316 | zImage: Image | ||
317 | |||
315 | $(BOOT_TARGETS): vmlinux | 318 | $(BOOT_TARGETS): vmlinux |
316 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ | 319 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ |
317 | 320 | ||
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 3e58d710013c..4b39af2dfda9 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c | |||
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) | |||
96 | } | 96 | } |
97 | 97 | ||
98 | /* the mmap semaphore is taken only if not in an atomic context */ | 98 | /* the mmap semaphore is taken only if not in an atomic context */ |
99 | atomic = in_atomic(); | 99 | atomic = faulthandler_disabled(); |
100 | 100 | ||
101 | if (!atomic) | 101 | if (!atomic) |
102 | down_read(¤t->mm->mmap_sem); | 102 | down_read(¤t->mm->mmap_sem); |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index 8e52621b5a6b..e1d2e991d17a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = { | |||
392 | .irq_mask = wakeupgen_mask, | 392 | .irq_mask = wakeupgen_mask, |
393 | .irq_unmask = wakeupgen_unmask, | 393 | .irq_unmask = wakeupgen_unmask, |
394 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 394 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
395 | .irq_set_type = irq_chip_set_type_parent, | ||
395 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, | 396 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, |
396 | #ifdef CONFIG_SMP | 397 | #ifdef CONFIG_SMP |
397 | .irq_set_affinity = irq_chip_set_affinity_parent, | 398 | .irq_set_affinity = irq_chip_set_affinity_parent, |
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index f02530e726f6..85c57158dcd9 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
168 | { | 168 | { |
169 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 169 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
170 | inject_abt32(vcpu, false, addr); | 170 | inject_abt32(vcpu, false, addr); |
171 | 171 | else | |
172 | inject_abt64(vcpu, false, addr); | 172 | inject_abt64(vcpu, false, addr); |
173 | } | 173 | } |
174 | 174 | ||
175 | /** | 175 | /** |
@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
184 | { | 184 | { |
185 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 185 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
186 | inject_abt32(vcpu, true, addr); | 186 | inject_abt32(vcpu, true, addr); |
187 | 187 | else | |
188 | inject_abt64(vcpu, true, addr); | 188 | inject_abt64(vcpu, true, addr); |
189 | } | 189 | } |
190 | 190 | ||
191 | /** | 191 | /** |
@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) | |||
198 | { | 198 | { |
199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
200 | inject_undef32(vcpu); | 200 | inject_undef32(vcpu); |
201 | 201 | else | |
202 | inject_undef64(vcpu); | 202 | inject_undef64(vcpu); |
203 | } | 203 | } |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index af42e7003f12..baa7b6fc0a60 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
407 | .set noat | 407 | .set noat |
408 | SAVE_ALL | 408 | SAVE_ALL |
409 | FEXPORT(handle_\exception\ext) | 409 | FEXPORT(handle_\exception\ext) |
410 | __BUILD_clear_\clear | 410 | __build_clear_\clear |
411 | .set at | 411 | .set at |
412 | __BUILD_\verbose \exception | 412 | __BUILD_\verbose \exception |
413 | move a0, sp | 413 | move a0, sp |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index 42e02a2d570b..efc3fa54c90b 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -191,6 +191,9 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
191 | 191 | ||
192 | pci_device_add(dev, bus); | 192 | pci_device_add(dev, bus); |
193 | 193 | ||
194 | /* Setup MSI caps & disable MSI/MSI-X interrupts */ | ||
195 | pci_msi_setup_pci_dev(dev); | ||
196 | |||
194 | return dev; | 197 | return dev; |
195 | } | 198 | } |
196 | EXPORT_SYMBOL(of_create_pci_dev); | 199 | EXPORT_SYMBOL(of_create_pci_dev); |
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 751bf4b7bf11..d7f3b3b78ac3 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h | |||
@@ -79,12 +79,12 @@ do { \ | |||
79 | #else /* CONFIG_X86_32 */ | 79 | #else /* CONFIG_X86_32 */ |
80 | 80 | ||
81 | /* frame pointer must be last for get_wchan */ | 81 | /* frame pointer must be last for get_wchan */ |
82 | #define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" | 82 | #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" |
83 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t" | 83 | #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t" |
84 | 84 | ||
85 | #define __EXTRA_CLOBBER \ | 85 | #define __EXTRA_CLOBBER \ |
86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ | 86 | , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \ |
87 | "r12", "r13", "r14", "r15", "flags" | 87 | "r12", "r13", "r14", "r15" |
88 | 88 | ||
89 | #ifdef CONFIG_CC_STACKPROTECTOR | 89 | #ifdef CONFIG_CC_STACKPROTECTOR |
90 | #define __switch_canary \ | 90 | #define __switch_canary \ |
@@ -100,11 +100,7 @@ do { \ | |||
100 | #define __switch_canary_iparam | 100 | #define __switch_canary_iparam |
101 | #endif /* CC_STACKPROTECTOR */ | 101 | #endif /* CC_STACKPROTECTOR */ |
102 | 102 | ||
103 | /* | 103 | /* Save restore flags to clear handle leaking NT */ |
104 | * There is no need to save or restore flags, because flags are always | ||
105 | * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL | ||
106 | * has no effect. | ||
107 | */ | ||
108 | #define switch_to(prev, next, last) \ | 104 | #define switch_to(prev, next, last) \ |
109 | asm volatile(SAVE_CONTEXT \ | 105 | asm volatile(SAVE_CONTEXT \ |
110 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ | 106 | "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index dcb52850a28f..cde732c1b495 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void) | |||
1424 | { | 1424 | { |
1425 | u64 msr; | 1425 | u64 msr; |
1426 | 1426 | ||
1427 | if (cpu_has_apic) | 1427 | if (!cpu_has_apic) |
1428 | return; | 1428 | return; |
1429 | 1429 | ||
1430 | rdmsrl(MSR_IA32_APICBASE, msr); | 1430 | rdmsrl(MSR_IA32_APICBASE, msr); |
@@ -1483,10 +1483,13 @@ void x2apic_setup(void) | |||
1483 | 1483 | ||
1484 | static __init void x2apic_disable(void) | 1484 | static __init void x2apic_disable(void) |
1485 | { | 1485 | { |
1486 | u32 x2apic_id; | 1486 | u32 x2apic_id, state = x2apic_state; |
1487 | 1487 | ||
1488 | if (x2apic_state != X2APIC_ON) | 1488 | x2apic_mode = 0; |
1489 | goto out; | 1489 | x2apic_state = X2APIC_DISABLED; |
1490 | |||
1491 | if (state != X2APIC_ON) | ||
1492 | return; | ||
1490 | 1493 | ||
1491 | x2apic_id = read_apic_id(); | 1494 | x2apic_id = read_apic_id(); |
1492 | if (x2apic_id >= 255) | 1495 | if (x2apic_id >= 255) |
@@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void) | |||
1494 | 1497 | ||
1495 | __x2apic_disable(); | 1498 | __x2apic_disable(); |
1496 | register_lapic_address(mp_lapic_addr); | 1499 | register_lapic_address(mp_lapic_addr); |
1497 | out: | ||
1498 | x2apic_state = X2APIC_DISABLED; | ||
1499 | x2apic_mode = 0; | ||
1500 | } | 1500 | } |
1501 | 1501 | ||
1502 | static __init void x2apic_enable(void) | 1502 | static __init void x2apic_enable(void) |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index f813261d9740..2683f36e4e0a 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |||
322 | irq_data->chip = &lapic_controller; | 322 | irq_data->chip = &lapic_controller; |
323 | irq_data->chip_data = data; | 323 | irq_data->chip_data = data; |
324 | irq_data->hwirq = virq + i; | 324 | irq_data->hwirq = virq + i; |
325 | err = assign_irq_vector_policy(virq, irq_data->node, data, | 325 | err = assign_irq_vector_policy(virq + i, irq_data->node, data, |
326 | info); | 326 | info); |
327 | if (err) | 327 | if (err) |
328 | goto error; | 328 | goto error; |
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 79de954626fd..d25097c3fc1d 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) | |||
270 | dst_fpu->fpregs_active = 0; | 270 | dst_fpu->fpregs_active = 0; |
271 | dst_fpu->last_cpu = -1; | 271 | dst_fpu->last_cpu = -1; |
272 | 272 | ||
273 | if (src_fpu->fpstate_active) | 273 | if (src_fpu->fpstate_active && cpu_has_fpu) |
274 | fpu_copy(dst_fpu, src_fpu); | 274 | fpu_copy(dst_fpu, src_fpu); |
275 | 275 | ||
276 | return 0; | 276 | return 0; |
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 1e173f6285c7..d14e9ac3235a 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c | |||
@@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void) | |||
40 | write_cr0(cr0); | 40 | write_cr0(cr0); |
41 | 41 | ||
42 | /* Flush out any pending x87 state: */ | 42 | /* Flush out any pending x87 state: */ |
43 | asm volatile ("fninit"); | 43 | #ifdef CONFIG_MATH_EMULATION |
44 | if (!cpu_has_fpu) | ||
45 | fpstate_init_soft(¤t->thread.fpu.state.soft); | ||
46 | else | ||
47 | #endif | ||
48 | asm volatile ("fninit"); | ||
44 | } | 49 | } |
45 | 50 | ||
46 | /* | 51 | /* |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 397688beed4b..c27cad726765 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) | |||
408 | static void mwait_idle(void) | 408 | static void mwait_idle(void) |
409 | { | 409 | { |
410 | if (!current_set_polling_and_test()) { | 410 | if (!current_set_polling_and_test()) { |
411 | trace_cpu_idle_rcuidle(1, smp_processor_id()); | ||
411 | if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { | 412 | if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) { |
412 | smp_mb(); /* quirk */ | 413 | smp_mb(); /* quirk */ |
413 | clflush((void *)¤t_thread_info()->flags); | 414 | clflush((void *)¤t_thread_info()->flags); |
@@ -419,6 +420,7 @@ static void mwait_idle(void) | |||
419 | __sti_mwait(0, 0); | 420 | __sti_mwait(0, 0); |
420 | else | 421 | else |
421 | local_irq_enable(); | 422 | local_irq_enable(); |
423 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | ||
422 | } else { | 424 | } else { |
423 | local_irq_enable(); | 425 | local_irq_enable(); |
424 | } | 426 | } |
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index e88fda867a33..484145368a24 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig | |||
@@ -8,7 +8,7 @@ config XEN | |||
8 | select PARAVIRT_CLOCK | 8 | select PARAVIRT_CLOCK |
9 | select XEN_HAVE_PVMMU | 9 | select XEN_HAVE_PVMMU |
10 | depends on X86_64 || (X86_32 && X86_PAE) | 10 | depends on X86_64 || (X86_32 && X86_PAE) |
11 | depends on X86_TSC | 11 | depends on X86_LOCAL_APIC && X86_TSC |
12 | help | 12 | help |
13 | This is the Linux Xen port. Enabling this will allow the | 13 | This is the Linux Xen port. Enabling this will allow the |
14 | kernel to boot in a paravirtualized environment under the | 14 | kernel to boot in a paravirtualized environment under the |
@@ -17,7 +17,7 @@ config XEN | |||
17 | config XEN_DOM0 | 17 | config XEN_DOM0 |
18 | def_bool y | 18 | def_bool y |
19 | depends on XEN && PCI_XEN && SWIOTLB_XEN | 19 | depends on XEN && PCI_XEN && SWIOTLB_XEN |
20 | depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI | 20 | depends on X86_IO_APIC && ACPI && PCI |
21 | 21 | ||
22 | config XEN_PVHVM | 22 | config XEN_PVHVM |
23 | def_bool y | 23 | def_bool y |
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index 628a42c41ab1..cf0fd96a7602 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
@@ -702,11 +702,11 @@ static ssize_t flags_show(struct device *dev, | |||
702 | u16 flags = to_nfit_memdev(dev)->flags; | 702 | u16 flags = to_nfit_memdev(dev)->flags; |
703 | 703 | ||
704 | return sprintf(buf, "%s%s%s%s%s\n", | 704 | return sprintf(buf, "%s%s%s%s%s\n", |
705 | flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "", | 705 | flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", |
706 | flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "", | 706 | flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", |
707 | flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "", | 707 | flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", |
708 | flags & ACPI_NFIT_MEM_ARMED ? "arm " : "", | 708 | flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "", |
709 | flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : ""); | 709 | flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); |
710 | } | 710 | } |
711 | static DEVICE_ATTR_RO(flags); | 711 | static DEVICE_ATTR_RO(flags); |
712 | 712 | ||
@@ -849,12 +849,12 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) | |||
849 | if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) | 849 | if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) |
850 | continue; | 850 | continue; |
851 | 851 | ||
852 | dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n", | 852 | dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", |
853 | nvdimm_name(nvdimm), | 853 | nvdimm_name(nvdimm), |
854 | mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "", | 854 | mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", |
855 | mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "", | 855 | mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", |
856 | mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "", | 856 | mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", |
857 | mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : ""); | 857 | mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : ""); |
858 | 858 | ||
859 | } | 859 | } |
860 | 860 | ||
@@ -1024,7 +1024,7 @@ static void wmb_blk(struct nfit_blk *nfit_blk) | |||
1024 | wmb_pmem(); | 1024 | wmb_pmem(); |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) | 1027 | static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) |
1028 | { | 1028 | { |
1029 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; | 1029 | struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; |
1030 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; | 1030 | u64 offset = nfit_blk->stat_offset + mmio->size * bw; |
@@ -1032,7 +1032,7 @@ static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) | |||
1032 | if (mmio->num_lines) | 1032 | if (mmio->num_lines) |
1033 | offset = to_interleave_offset(offset, mmio); | 1033 | offset = to_interleave_offset(offset, mmio); |
1034 | 1034 | ||
1035 | return readq(mmio->base + offset); | 1035 | return readl(mmio->base + offset); |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, | 1038 | static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 815f75ef2411..2922f1f252d5 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/workqueue.h> | ||
35 | #include <acpi/video.h> | 36 | #include <acpi/video.h> |
36 | 37 | ||
37 | ACPI_MODULE_NAME("video"); | 38 | ACPI_MODULE_NAME("video"); |
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void); | |||
41 | 42 | ||
42 | static bool backlight_notifier_registered; | 43 | static bool backlight_notifier_registered; |
43 | static struct notifier_block backlight_nb; | 44 | static struct notifier_block backlight_nb; |
45 | static struct work_struct backlight_notify_work; | ||
44 | 46 | ||
45 | static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; | 47 | static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef; |
46 | static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; | 48 | static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef; |
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = { | |||
262 | { }, | 264 | { }, |
263 | }; | 265 | }; |
264 | 266 | ||
267 | /* This uses a workqueue to avoid various locking ordering issues */ | ||
268 | static void acpi_video_backlight_notify_work(struct work_struct *work) | ||
269 | { | ||
270 | if (acpi_video_get_backlight_type() != acpi_backlight_video) | ||
271 | acpi_video_unregister_backlight(); | ||
272 | } | ||
273 | |||
265 | static int acpi_video_backlight_notify(struct notifier_block *nb, | 274 | static int acpi_video_backlight_notify(struct notifier_block *nb, |
266 | unsigned long val, void *bd) | 275 | unsigned long val, void *bd) |
267 | { | 276 | { |
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb, | |||
269 | 278 | ||
270 | /* A raw bl registering may change video -> native */ | 279 | /* A raw bl registering may change video -> native */ |
271 | if (backlight->props.type == BACKLIGHT_RAW && | 280 | if (backlight->props.type == BACKLIGHT_RAW && |
272 | val == BACKLIGHT_REGISTERED && | 281 | val == BACKLIGHT_REGISTERED) |
273 | acpi_video_get_backlight_type() != acpi_backlight_video) | 282 | schedule_work(&backlight_notify_work); |
274 | acpi_video_unregister_backlight(); | ||
275 | 283 | ||
276 | return NOTIFY_OK; | 284 | return NOTIFY_OK; |
277 | } | 285 | } |
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void) | |||
304 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, | 312 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, |
305 | ACPI_UINT32_MAX, find_video, NULL, | 313 | ACPI_UINT32_MAX, find_video, NULL, |
306 | &video_caps, NULL); | 314 | &video_caps, NULL); |
315 | INIT_WORK(&backlight_notify_work, | ||
316 | acpi_video_backlight_notify_work); | ||
307 | backlight_nb.notifier_call = acpi_video_backlight_notify; | 317 | backlight_nb.notifier_call = acpi_video_backlight_notify; |
308 | backlight_nb.priority = 0; | 318 | backlight_nb.priority = 0; |
309 | if (backlight_register_notifier(&backlight_nb) == 0) | 319 | if (backlight_register_notifier(&backlight_nb) == 0) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 19bcb80b2031..790e0deb278e 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4230,6 +4230,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4230 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4230 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4231 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4231 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4232 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4232 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4233 | { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | ||
4234 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | ||
4233 | 4235 | ||
4234 | /* devices that don't properly handle TRIM commands */ | 4236 | /* devices that don't properly handle TRIM commands */ |
4235 | { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, | 4237 | { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 4a2ef09e6704..f504232c1ee7 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -3756,6 +3756,14 @@ static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, | |||
3756 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); | 3756 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
3757 | u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; | 3757 | u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; |
3758 | 3758 | ||
3759 | /* | ||
3760 | * For flush requests, request_idx starts at the end of the | ||
3761 | * tag space. Since we don't support FLUSH/FUA, simply return | ||
3762 | * 0 as there's nothing to be done. | ||
3763 | */ | ||
3764 | if (request_idx >= MTIP_MAX_COMMAND_SLOTS) | ||
3765 | return 0; | ||
3766 | |||
3759 | cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, | 3767 | cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, |
3760 | &cmd->command_dma, GFP_KERNEL); | 3768 | &cmd->command_dma, GFP_KERNEL); |
3761 | if (!cmd->command) | 3769 | if (!cmd->command) |
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index 2d59038dec43..86c7eb66bdfb 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c | |||
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type) | |||
462 | BUG_ON(!imxtm->base); | 462 | BUG_ON(!imxtm->base); |
463 | 463 | ||
464 | imxtm->type = type; | 464 | imxtm->type = type; |
465 | imxtm->irq = irq; | ||
465 | 466 | ||
466 | _mxc_timer_init(imxtm); | 467 | _mxc_timer_init(imxtm); |
467 | } | 468 | } |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index ae5b2bd3a978..fa3dd840a837 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
180 | ret = exynos5250_cpufreq_init(exynos_info); | 180 | ret = exynos5250_cpufreq_init(exynos_info); |
181 | } else { | 181 | } else { |
182 | pr_err("%s: Unknown SoC type\n", __func__); | 182 | pr_err("%s: Unknown SoC type\n", __func__); |
183 | return -ENODEV; | 183 | ret = -ENODEV; |
184 | } | 184 | } |
185 | 185 | ||
186 | if (ret) | 186 | if (ret) |
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) | |||
188 | 188 | ||
189 | if (exynos_info->set_freq == NULL) { | 189 | if (exynos_info->set_freq == NULL) { |
190 | dev_err(&pdev->dev, "No set_freq function (ERR)\n"); | 190 | dev_err(&pdev->dev, "No set_freq function (ERR)\n"); |
191 | ret = -EINVAL; | ||
191 | goto err_vdd_arm; | 192 | goto err_vdd_arm; |
192 | } | 193 | } |
193 | 194 | ||
194 | arm_regulator = regulator_get(NULL, "vdd_arm"); | 195 | arm_regulator = regulator_get(NULL, "vdd_arm"); |
195 | if (IS_ERR(arm_regulator)) { | 196 | if (IS_ERR(arm_regulator)) { |
196 | dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); | 197 | dev_err(&pdev->dev, "failed to get resource vdd_arm\n"); |
198 | ret = -EINVAL; | ||
197 | goto err_vdd_arm; | 199 | goto err_vdd_arm; |
198 | } | 200 | } |
199 | 201 | ||
@@ -225,7 +227,7 @@ err_cpufreq_reg: | |||
225 | regulator_put(arm_regulator); | 227 | regulator_put(arm_regulator); |
226 | err_vdd_arm: | 228 | err_vdd_arm: |
227 | kfree(exynos_info); | 229 | kfree(exynos_info); |
228 | return -EINVAL; | 230 | return ret; |
229 | } | 231 | } |
230 | 232 | ||
231 | static struct platform_driver exynos_cpufreq_platdrv = { | 233 | static struct platform_driver exynos_cpufreq_platdrv = { |
diff --git a/drivers/firmware/broadcom/bcm47xx_nvram.c b/drivers/firmware/broadcom/bcm47xx_nvram.c index 87add3fdce52..e41594510b97 100644 --- a/drivers/firmware/broadcom/bcm47xx_nvram.c +++ b/drivers/firmware/broadcom/bcm47xx_nvram.c | |||
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size) | |||
245 | } | 245 | } |
246 | EXPORT_SYMBOL(bcm47xx_nvram_get_contents); | 246 | EXPORT_SYMBOL(bcm47xx_nvram_get_contents); |
247 | 247 | ||
248 | MODULE_LICENSE("GPLv2"); | 248 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 6fad1f9648f3..ef6182bc8e5e 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | |||
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev) | |||
559 | return 0; | 559 | return 0; |
560 | } | 560 | } |
561 | 561 | ||
562 | #ifdef CONFIG_PM | 562 | #ifdef CONFIG_PM_SLEEP |
563 | static int atmel_hlcdc_dc_drm_suspend(struct device *dev) | 563 | static int atmel_hlcdc_dc_drm_suspend(struct device *dev) |
564 | { | 564 | { |
565 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 565 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 3dcd59e694db..198fc3c3291b 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1075,34 +1075,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
1075 | const union child_device_config *p_child; | 1075 | const union child_device_config *p_child; |
1076 | union child_device_config *child_dev_ptr; | 1076 | union child_device_config *child_dev_ptr; |
1077 | int i, child_device_num, count; | 1077 | int i, child_device_num, count; |
1078 | u8 expected_size; | 1078 | u16 block_size; |
1079 | u16 block_size; | ||
1080 | 1079 | ||
1081 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); | 1080 | p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); |
1082 | if (!p_defs) { | 1081 | if (!p_defs) { |
1083 | DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); | 1082 | DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); |
1084 | return; | 1083 | return; |
1085 | } | 1084 | } |
1086 | if (bdb->version < 195) { | 1085 | if (p_defs->child_dev_size < sizeof(*p_child)) { |
1087 | expected_size = 33; | 1086 | DRM_ERROR("General definiton block child device size is too small.\n"); |
1088 | } else if (bdb->version == 195) { | ||
1089 | expected_size = 37; | ||
1090 | } else if (bdb->version <= 197) { | ||
1091 | expected_size = 38; | ||
1092 | } else { | ||
1093 | expected_size = 38; | ||
1094 | DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n", | ||
1095 | expected_size, bdb->version); | ||
1096 | } | ||
1097 | |||
1098 | if (expected_size > sizeof(*p_child)) { | ||
1099 | DRM_ERROR("child_device_config cannot fit in p_child\n"); | ||
1100 | return; | ||
1101 | } | ||
1102 | |||
1103 | if (p_defs->child_dev_size != expected_size) { | ||
1104 | DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n", | ||
1105 | p_defs->child_dev_size, expected_size, bdb->version); | ||
1106 | return; | 1087 | return; |
1107 | } | 1088 | } |
1108 | /* get the block size of general definitions */ | 1089 | /* get the block size of general definitions */ |
@@ -1149,7 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
1149 | 1130 | ||
1150 | child_dev_ptr = dev_priv->vbt.child_dev + count; | 1131 | child_dev_ptr = dev_priv->vbt.child_dev + count; |
1151 | count++; | 1132 | count++; |
1152 | memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); | 1133 | memcpy(child_dev_ptr, p_child, sizeof(*p_child)); |
1153 | } | 1134 | } |
1154 | return; | 1135 | return; |
1155 | } | 1136 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6e8faa253792..1df0e1fe235f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = { | |||
93 | 93 | ||
94 | static const int skl_rates[] = { 162000, 216000, 270000, | 94 | static const int skl_rates[] = { 162000, 216000, 270000, |
95 | 324000, 432000, 540000 }; | 95 | 324000, 432000, 540000 }; |
96 | static const int chv_rates[] = { 162000, 202500, 210000, 216000, | ||
97 | 243000, 270000, 324000, 405000, | ||
98 | 420000, 432000, 540000 }; | ||
99 | static const int default_rates[] = { 162000, 270000, 540000 }; | 96 | static const int default_rates[] = { 162000, 270000, 540000 }; |
100 | 97 | ||
101 | /** | 98 | /** |
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) | |||
1169 | return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; | 1166 | return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; |
1170 | } | 1167 | } |
1171 | 1168 | ||
1169 | static bool intel_dp_source_supports_hbr2(struct drm_device *dev) | ||
1170 | { | ||
1171 | /* WaDisableHBR2:skl */ | ||
1172 | if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) | ||
1173 | return false; | ||
1174 | |||
1175 | if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) || | ||
1176 | (INTEL_INFO(dev)->gen >= 9)) | ||
1177 | return true; | ||
1178 | else | ||
1179 | return false; | ||
1180 | } | ||
1181 | |||
1172 | static int | 1182 | static int |
1173 | intel_dp_source_rates(struct drm_device *dev, const int **source_rates) | 1183 | intel_dp_source_rates(struct drm_device *dev, const int **source_rates) |
1174 | { | 1184 | { |
1175 | if (IS_SKYLAKE(dev)) { | 1185 | if (IS_SKYLAKE(dev)) { |
1176 | *source_rates = skl_rates; | 1186 | *source_rates = skl_rates; |
1177 | return ARRAY_SIZE(skl_rates); | 1187 | return ARRAY_SIZE(skl_rates); |
1178 | } else if (IS_CHERRYVIEW(dev)) { | ||
1179 | *source_rates = chv_rates; | ||
1180 | return ARRAY_SIZE(chv_rates); | ||
1181 | } | 1188 | } |
1182 | 1189 | ||
1183 | *source_rates = default_rates; | 1190 | *source_rates = default_rates; |
1184 | 1191 | ||
1185 | if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) | 1192 | /* This depends on the fact that 5.4 is last value in the array */ |
1186 | /* WaDisableHBR2:skl */ | 1193 | if (intel_dp_source_supports_hbr2(dev)) |
1187 | return (DP_LINK_BW_2_7 >> 3) + 1; | ||
1188 | else if (INTEL_INFO(dev)->gen >= 8 || | ||
1189 | (IS_HASWELL(dev) && !IS_HSW_ULX(dev))) | ||
1190 | return (DP_LINK_BW_5_4 >> 3) + 1; | 1194 | return (DP_LINK_BW_5_4 >> 3) + 1; |
1191 | else | 1195 | else |
1192 | return (DP_LINK_BW_2_7 >> 3) + 1; | 1196 | return (DP_LINK_BW_2_7 >> 3) + 1; |
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3941 | } | 3945 | } |
3942 | } | 3946 | } |
3943 | 3947 | ||
3944 | /* Training Pattern 3 support, both source and sink */ | 3948 | /* Training Pattern 3 support, Intel platforms that support HBR2 alone |
3949 | * have support for TP3 hence that check is used along with dpcd check | ||
3950 | * to ensure TP3 can be enabled. | ||
3951 | * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is | ||
3952 | * supported but still not enabled. | ||
3953 | */ | ||
3945 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && | 3954 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && |
3946 | intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && | 3955 | intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && |
3947 | (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { | 3956 | intel_dp_source_supports_hbr2(dev)) { |
3948 | intel_dp->use_tps3 = true; | 3957 | intel_dp->use_tps3 = true; |
3949 | DRM_DEBUG_KMS("Displayport TPS3 supported\n"); | 3958 | DRM_DEBUG_KMS("Displayport TPS3 supported\n"); |
3950 | } else | 3959 | } else |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 9b74ffae5f5a..7f2161a1ff5d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring, | |||
1012 | ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); | 1012 | ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); |
1013 | if (ret) | 1013 | if (ret) |
1014 | goto unpin_ctx_obj; | 1014 | goto unpin_ctx_obj; |
1015 | |||
1016 | ctx_obj->dirty = true; | ||
1015 | } | 1017 | } |
1016 | 1018 | ||
1017 | return ret; | 1019 | return ret; |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 1162bfa464f3..171d3e43c30c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work) | |||
79 | struct drm_mode_config *mode_config = &dev->mode_config; | 79 | struct drm_mode_config *mode_config = &dev->mode_config; |
80 | struct drm_connector *connector; | 80 | struct drm_connector *connector; |
81 | 81 | ||
82 | /* we can race here at startup, some boards seem to trigger | ||
83 | * hotplug irqs when they shouldn't. */ | ||
84 | if (!rdev->mode_info.mode_config_initialized) | ||
85 | return; | ||
86 | |||
82 | mutex_lock(&mode_config->mutex); | 87 | mutex_lock(&mode_config->mutex); |
83 | if (mode_config->num_connector) { | 88 | if (mode_config->num_connector) { |
84 | list_for_each_entry(connector, &mode_config->connector_list, head) | 89 | list_for_each_entry(connector, &mode_config->connector_list, head) |
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c index 097d7216d98e..c6dc644aa580 100644 --- a/drivers/input/keyboard/gpio_keys_polled.c +++ b/drivers/input/keyboard/gpio_keys_polled.c | |||
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev) | |||
246 | * convert it to descriptor. | 246 | * convert it to descriptor. |
247 | */ | 247 | */ |
248 | if (!button->gpiod && gpio_is_valid(button->gpio)) { | 248 | if (!button->gpiod && gpio_is_valid(button->gpio)) { |
249 | unsigned flags = 0; | 249 | unsigned flags = GPIOF_IN; |
250 | 250 | ||
251 | if (button->active_low) | 251 | if (button->active_low) |
252 | flags |= GPIOF_ACTIVE_LOW; | 252 | flags |= GPIOF_ACTIVE_LOW; |
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 692fe2bc8197..c12bb93334ff 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c | |||
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = { | |||
68 | .irq_mask = irq_chip_mask_parent, | 68 | .irq_mask = irq_chip_mask_parent, |
69 | .irq_unmask = irq_chip_unmask_parent, | 69 | .irq_unmask = irq_chip_unmask_parent, |
70 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 70 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
71 | .irq_set_wake = irq_chip_set_wake_parent, | 71 | .irq_set_type = irq_chip_set_type_parent, |
72 | .flags = IRQCHIP_MASK_ON_SUSPEND | | ||
73 | IRQCHIP_SKIP_SET_WAKE, | ||
72 | #ifdef CONFIG_SMP | 74 | #ifdef CONFIG_SMP |
73 | .irq_set_affinity = irq_chip_set_affinity_parent, | 75 | .irq_set_affinity = irq_chip_set_affinity_parent, |
74 | #endif | 76 | #endif |
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig index 0d35f5850ff1..5ab90f36a6a6 100644 --- a/drivers/media/dvb-frontends/Kconfig +++ b/drivers/media/dvb-frontends/Kconfig | |||
@@ -240,7 +240,7 @@ config DVB_SI21XX | |||
240 | 240 | ||
241 | config DVB_TS2020 | 241 | config DVB_TS2020 |
242 | tristate "Montage Tehnology TS2020 based tuners" | 242 | tristate "Montage Tehnology TS2020 based tuners" |
243 | depends on DVB_CORE | 243 | depends on DVB_CORE && I2C |
244 | select REGMAP_I2C | 244 | select REGMAP_I2C |
245 | default m if !MEDIA_SUBDRV_AUTOSELECT | 245 | default m if !MEDIA_SUBDRV_AUTOSELECT |
246 | help | 246 | help |
diff --git a/drivers/media/pci/cobalt/Kconfig b/drivers/media/pci/cobalt/Kconfig index 3be1b2c3c386..6a1c0089bb62 100644 --- a/drivers/media/pci/cobalt/Kconfig +++ b/drivers/media/pci/cobalt/Kconfig | |||
@@ -2,6 +2,7 @@ config VIDEO_COBALT | |||
2 | tristate "Cisco Cobalt support" | 2 | tristate "Cisco Cobalt support" |
3 | depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER | 3 | depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER |
4 | depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB | 4 | depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB |
5 | depends on SND | ||
5 | select I2C_ALGOBIT | 6 | select I2C_ALGOBIT |
6 | select VIDEO_ADV7604 | 7 | select VIDEO_ADV7604 |
7 | select VIDEO_ADV7511 | 8 | select VIDEO_ADV7511 |
diff --git a/drivers/media/pci/cobalt/cobalt-irq.c b/drivers/media/pci/cobalt/cobalt-irq.c index dd4bff9cf339..d1f5898d11ba 100644 --- a/drivers/media/pci/cobalt/cobalt-irq.c +++ b/drivers/media/pci/cobalt/cobalt-irq.c | |||
@@ -139,7 +139,7 @@ done: | |||
139 | also know about dropped frames. */ | 139 | also know about dropped frames. */ |
140 | cb->vb.v4l2_buf.sequence = s->sequence++; | 140 | cb->vb.v4l2_buf.sequence = s->sequence++; |
141 | vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? | 141 | vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? |
142 | VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE); | 142 | VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE); |
143 | } | 143 | } |
144 | 144 | ||
145 | irqreturn_t cobalt_irq_handler(int irq, void *dev_id) | 145 | irqreturn_t cobalt_irq_handler(int irq, void *dev_id) |
diff --git a/drivers/media/pci/mantis/mantis_dma.c b/drivers/media/pci/mantis/mantis_dma.c index 1d59c7e039f7..87990ece5848 100644 --- a/drivers/media/pci/mantis/mantis_dma.c +++ b/drivers/media/pci/mantis/mantis_dma.c | |||
@@ -130,10 +130,11 @@ err: | |||
130 | 130 | ||
131 | int mantis_dma_init(struct mantis_pci *mantis) | 131 | int mantis_dma_init(struct mantis_pci *mantis) |
132 | { | 132 | { |
133 | int err = 0; | 133 | int err; |
134 | 134 | ||
135 | dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); | 135 | dprintk(MANTIS_DEBUG, 1, "Mantis DMA init"); |
136 | if (mantis_alloc_buffers(mantis) < 0) { | 136 | err = mantis_alloc_buffers(mantis); |
137 | if (err < 0) { | ||
137 | dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); | 138 | dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer"); |
138 | 139 | ||
139 | /* Stop RISC Engine */ | 140 | /* Stop RISC Engine */ |
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c index 8939ebd74391..84fa6e9b59a1 100644 --- a/drivers/media/rc/ir-rc5-decoder.c +++ b/drivers/media/rc/ir-rc5-decoder.c | |||
@@ -184,125 +184,9 @@ out: | |||
184 | return -EINVAL; | 184 | return -EINVAL; |
185 | } | 185 | } |
186 | 186 | ||
187 | static struct ir_raw_timings_manchester ir_rc5_timings = { | ||
188 | .leader = RC5_UNIT, | ||
189 | .pulse_space_start = 0, | ||
190 | .clock = RC5_UNIT, | ||
191 | .trailer_space = RC5_UNIT * 10, | ||
192 | }; | ||
193 | |||
194 | static struct ir_raw_timings_manchester ir_rc5x_timings[2] = { | ||
195 | { | ||
196 | .leader = RC5_UNIT, | ||
197 | .pulse_space_start = 0, | ||
198 | .clock = RC5_UNIT, | ||
199 | .trailer_space = RC5X_SPACE, | ||
200 | }, | ||
201 | { | ||
202 | .clock = RC5_UNIT, | ||
203 | .trailer_space = RC5_UNIT * 10, | ||
204 | }, | ||
205 | }; | ||
206 | |||
207 | static struct ir_raw_timings_manchester ir_rc5_sz_timings = { | ||
208 | .leader = RC5_UNIT, | ||
209 | .pulse_space_start = 0, | ||
210 | .clock = RC5_UNIT, | ||
211 | .trailer_space = RC5_UNIT * 10, | ||
212 | }; | ||
213 | |||
214 | static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode, | ||
215 | unsigned int important_bits) | ||
216 | { | ||
217 | /* all important bits of scancode should be set in mask */ | ||
218 | if (~scancode->mask & important_bits) | ||
219 | return -EINVAL; | ||
220 | /* extra bits in mask should be zero in data */ | ||
221 | if (scancode->mask & scancode->data & ~important_bits) | ||
222 | return -EINVAL; | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * ir_rc5_encode() - Encode a scancode as a stream of raw events | ||
228 | * | ||
229 | * @protocols: allowed protocols | ||
230 | * @scancode: scancode filter describing scancode (helps distinguish between | ||
231 | * protocol subtypes when scancode is ambiguous) | ||
232 | * @events: array of raw ir events to write into | ||
233 | * @max: maximum size of @events | ||
234 | * | ||
235 | * Returns: The number of events written. | ||
236 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
237 | * encoding. In this case all @max events will have been written. | ||
238 | * -EINVAL if the scancode is ambiguous or invalid. | ||
239 | */ | ||
240 | static int ir_rc5_encode(u64 protocols, | ||
241 | const struct rc_scancode_filter *scancode, | ||
242 | struct ir_raw_event *events, unsigned int max) | ||
243 | { | ||
244 | int ret; | ||
245 | struct ir_raw_event *e = events; | ||
246 | unsigned int data, xdata, command, commandx, system; | ||
247 | |||
248 | /* Detect protocol and convert scancode to raw data */ | ||
249 | if (protocols & RC_BIT_RC5 && | ||
250 | !ir_rc5_validate_filter(scancode, 0x1f7f)) { | ||
251 | /* decode scancode */ | ||
252 | command = (scancode->data & 0x003f) >> 0; | ||
253 | commandx = (scancode->data & 0x0040) >> 6; | ||
254 | system = (scancode->data & 0x1f00) >> 8; | ||
255 | /* encode data */ | ||
256 | data = !commandx << 12 | system << 6 | command; | ||
257 | |||
258 | /* Modulate the data */ | ||
259 | ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS, | ||
260 | data); | ||
261 | if (ret < 0) | ||
262 | return ret; | ||
263 | } else if (protocols & RC_BIT_RC5X && | ||
264 | !ir_rc5_validate_filter(scancode, 0x1f7f3f)) { | ||
265 | /* decode scancode */ | ||
266 | xdata = (scancode->data & 0x00003f) >> 0; | ||
267 | command = (scancode->data & 0x003f00) >> 8; | ||
268 | commandx = (scancode->data & 0x004000) >> 14; | ||
269 | system = (scancode->data & 0x1f0000) >> 16; | ||
270 | /* commandx and system overlap, bits must match when encoded */ | ||
271 | if (commandx == (system & 0x1)) | ||
272 | return -EINVAL; | ||
273 | /* encode data */ | ||
274 | data = 1 << 18 | system << 12 | command << 6 | xdata; | ||
275 | |||
276 | /* Modulate the data */ | ||
277 | ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0], | ||
278 | CHECK_RC5X_NBITS, | ||
279 | data >> (RC5X_NBITS-CHECK_RC5X_NBITS)); | ||
280 | if (ret < 0) | ||
281 | return ret; | ||
282 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
283 | &ir_rc5x_timings[1], | ||
284 | RC5X_NBITS - CHECK_RC5X_NBITS, | ||
285 | data); | ||
286 | if (ret < 0) | ||
287 | return ret; | ||
288 | } else if (protocols & RC_BIT_RC5_SZ && | ||
289 | !ir_rc5_validate_filter(scancode, 0x2fff)) { | ||
290 | /* RC5-SZ scancode is raw enough for Manchester as it is */ | ||
291 | ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings, | ||
292 | RC5_SZ_NBITS, scancode->data & 0x2fff); | ||
293 | if (ret < 0) | ||
294 | return ret; | ||
295 | } else { | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | |||
299 | return e - events; | ||
300 | } | ||
301 | |||
302 | static struct ir_raw_handler rc5_handler = { | 187 | static struct ir_raw_handler rc5_handler = { |
303 | .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, | 188 | .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, |
304 | .decode = ir_rc5_decode, | 189 | .decode = ir_rc5_decode, |
305 | .encode = ir_rc5_encode, | ||
306 | }; | 190 | }; |
307 | 191 | ||
308 | static int __init ir_rc5_decode_init(void) | 192 | static int __init ir_rc5_decode_init(void) |
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c index f9c70baf6e0c..d16bc67af732 100644 --- a/drivers/media/rc/ir-rc6-decoder.c +++ b/drivers/media/rc/ir-rc6-decoder.c | |||
@@ -291,133 +291,11 @@ out: | |||
291 | return -EINVAL; | 291 | return -EINVAL; |
292 | } | 292 | } |
293 | 293 | ||
294 | static struct ir_raw_timings_manchester ir_rc6_timings[4] = { | ||
295 | { | ||
296 | .leader = RC6_PREFIX_PULSE, | ||
297 | .pulse_space_start = 0, | ||
298 | .clock = RC6_UNIT, | ||
299 | .invert = 1, | ||
300 | .trailer_space = RC6_PREFIX_SPACE, | ||
301 | }, | ||
302 | { | ||
303 | .clock = RC6_UNIT, | ||
304 | .invert = 1, | ||
305 | }, | ||
306 | { | ||
307 | .clock = RC6_UNIT * 2, | ||
308 | .invert = 1, | ||
309 | }, | ||
310 | { | ||
311 | .clock = RC6_UNIT, | ||
312 | .invert = 1, | ||
313 | .trailer_space = RC6_SUFFIX_SPACE, | ||
314 | }, | ||
315 | }; | ||
316 | |||
317 | static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode, | ||
318 | unsigned int important_bits) | ||
319 | { | ||
320 | /* all important bits of scancode should be set in mask */ | ||
321 | if (~scancode->mask & important_bits) | ||
322 | return -EINVAL; | ||
323 | /* extra bits in mask should be zero in data */ | ||
324 | if (scancode->mask & scancode->data & ~important_bits) | ||
325 | return -EINVAL; | ||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * ir_rc6_encode() - Encode a scancode as a stream of raw events | ||
331 | * | ||
332 | * @protocols: allowed protocols | ||
333 | * @scancode: scancode filter describing scancode (helps distinguish between | ||
334 | * protocol subtypes when scancode is ambiguous) | ||
335 | * @events: array of raw ir events to write into | ||
336 | * @max: maximum size of @events | ||
337 | * | ||
338 | * Returns: The number of events written. | ||
339 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
340 | * encoding. In this case all @max events will have been written. | ||
341 | * -EINVAL if the scancode is ambiguous or invalid. | ||
342 | */ | ||
343 | static int ir_rc6_encode(u64 protocols, | ||
344 | const struct rc_scancode_filter *scancode, | ||
345 | struct ir_raw_event *events, unsigned int max) | ||
346 | { | ||
347 | int ret; | ||
348 | struct ir_raw_event *e = events; | ||
349 | |||
350 | if (protocols & RC_BIT_RC6_0 && | ||
351 | !ir_rc6_validate_filter(scancode, 0xffff)) { | ||
352 | |||
353 | /* Modulate the preamble */ | ||
354 | ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0); | ||
355 | if (ret < 0) | ||
356 | return ret; | ||
357 | |||
358 | /* Modulate the header (Start Bit & Mode-0) */ | ||
359 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
360 | &ir_rc6_timings[1], | ||
361 | RC6_HEADER_NBITS, (1 << 3)); | ||
362 | if (ret < 0) | ||
363 | return ret; | ||
364 | |||
365 | /* Modulate Trailer Bit */ | ||
366 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
367 | &ir_rc6_timings[2], 1, 0); | ||
368 | if (ret < 0) | ||
369 | return ret; | ||
370 | |||
371 | /* Modulate rest of the data */ | ||
372 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
373 | &ir_rc6_timings[3], RC6_0_NBITS, | ||
374 | scancode->data); | ||
375 | if (ret < 0) | ||
376 | return ret; | ||
377 | |||
378 | } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | | ||
379 | RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) && | ||
380 | !ir_rc6_validate_filter(scancode, 0x8fffffff)) { | ||
381 | |||
382 | /* Modulate the preamble */ | ||
383 | ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0); | ||
384 | if (ret < 0) | ||
385 | return ret; | ||
386 | |||
387 | /* Modulate the header (Start Bit & Header-version 6 */ | ||
388 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
389 | &ir_rc6_timings[1], | ||
390 | RC6_HEADER_NBITS, (1 << 3 | 6)); | ||
391 | if (ret < 0) | ||
392 | return ret; | ||
393 | |||
394 | /* Modulate Trailer Bit */ | ||
395 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
396 | &ir_rc6_timings[2], 1, 0); | ||
397 | if (ret < 0) | ||
398 | return ret; | ||
399 | |||
400 | /* Modulate rest of the data */ | ||
401 | ret = ir_raw_gen_manchester(&e, max - (e - events), | ||
402 | &ir_rc6_timings[3], | ||
403 | fls(scancode->mask), | ||
404 | scancode->data); | ||
405 | if (ret < 0) | ||
406 | return ret; | ||
407 | |||
408 | } else { | ||
409 | return -EINVAL; | ||
410 | } | ||
411 | |||
412 | return e - events; | ||
413 | } | ||
414 | |||
415 | static struct ir_raw_handler rc6_handler = { | 294 | static struct ir_raw_handler rc6_handler = { |
416 | .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | | 295 | .protocols = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | |
417 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | | 296 | RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 | |
418 | RC_BIT_RC6_MCE, | 297 | RC_BIT_RC6_MCE, |
419 | .decode = ir_rc6_decode, | 298 | .decode = ir_rc6_decode, |
420 | .encode = ir_rc6_encode, | ||
421 | }; | 299 | }; |
422 | 300 | ||
423 | static int __init ir_rc6_decode_init(void) | 301 | static int __init ir_rc6_decode_init(void) |
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index baeb5971fd52..85af7a869167 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier) | |||
526 | return 0; | 526 | return 0; |
527 | } | 527 | } |
528 | 528 | ||
529 | static int nvt_write_wakeup_codes(struct rc_dev *dev, | ||
530 | const u8 *wakeup_sample_buf, int count) | ||
531 | { | ||
532 | int i = 0; | ||
533 | u8 reg, reg_learn_mode; | ||
534 | unsigned long flags; | ||
535 | struct nvt_dev *nvt = dev->priv; | ||
536 | |||
537 | nvt_dbg_wake("writing wakeup samples"); | ||
538 | |||
539 | reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON); | ||
540 | reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0; | ||
541 | reg_learn_mode |= CIR_WAKE_IRCON_MODE1; | ||
542 | |||
543 | /* Lock the learn area to prevent racing with wake-isr */ | ||
544 | spin_lock_irqsave(&nvt->nvt_lock, flags); | ||
545 | |||
546 | /* Enable fifo writes */ | ||
547 | nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON); | ||
548 | |||
549 | /* Clear cir wake rx fifo */ | ||
550 | nvt_clear_cir_wake_fifo(nvt); | ||
551 | |||
552 | if (count > WAKE_FIFO_LEN) { | ||
553 | nvt_dbg_wake("HW FIFO too small for all wake samples"); | ||
554 | count = WAKE_FIFO_LEN; | ||
555 | } | ||
556 | |||
557 | if (count) | ||
558 | pr_info("Wake samples (%d) =", count); | ||
559 | else | ||
560 | pr_info("Wake sample fifo cleared"); | ||
561 | |||
562 | /* Write wake samples to fifo */ | ||
563 | for (i = 0; i < count; i++) { | ||
564 | pr_cont(" %02x", wakeup_sample_buf[i]); | ||
565 | nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i], | ||
566 | CIR_WAKE_WR_FIFO_DATA); | ||
567 | } | ||
568 | pr_cont("\n"); | ||
569 | |||
570 | /* Switch cir to wakeup mode and disable fifo writing */ | ||
571 | nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON); | ||
572 | |||
573 | /* Set number of bytes needed for wake */ | ||
574 | nvt_cir_wake_reg_write(nvt, count ? count : | ||
575 | CIR_WAKE_FIFO_CMP_BYTES, | ||
576 | CIR_WAKE_FIFO_CMP_DEEP); | ||
577 | |||
578 | spin_unlock_irqrestore(&nvt->nvt_lock, flags); | ||
579 | |||
580 | return 0; | ||
581 | } | ||
582 | |||
583 | static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev, | ||
584 | struct rc_scancode_filter *sc_filter) | ||
585 | { | ||
586 | u8 *reg_buf; | ||
587 | u8 buf_val; | ||
588 | int i, ret, count; | ||
589 | unsigned int val; | ||
590 | struct ir_raw_event *raw; | ||
591 | bool complete; | ||
592 | |||
593 | /* Require both mask and data to be set before actually committing */ | ||
594 | if (!sc_filter->mask || !sc_filter->data) | ||
595 | return 0; | ||
596 | |||
597 | raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL); | ||
598 | if (!raw) | ||
599 | return -ENOMEM; | ||
600 | |||
601 | ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter, | ||
602 | raw, WAKE_FIFO_LEN); | ||
603 | complete = (ret != -ENOBUFS); | ||
604 | if (!complete) | ||
605 | ret = WAKE_FIFO_LEN; | ||
606 | else if (ret < 0) | ||
607 | goto out_raw; | ||
608 | |||
609 | reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL); | ||
610 | if (!reg_buf) { | ||
611 | ret = -ENOMEM; | ||
612 | goto out_raw; | ||
613 | } | ||
614 | |||
615 | /* Inspect the ir samples */ | ||
616 | for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) { | ||
617 | val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD; | ||
618 | |||
619 | /* Split too large values into several smaller ones */ | ||
620 | while (val > 0 && count < WAKE_FIFO_LEN) { | ||
621 | |||
622 | /* Skip last value for better comparison tolerance */ | ||
623 | if (complete && i == ret - 1 && val < BUF_LEN_MASK) | ||
624 | break; | ||
625 | |||
626 | /* Clamp values to BUF_LEN_MASK at most */ | ||
627 | buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val; | ||
628 | |||
629 | reg_buf[count] = buf_val; | ||
630 | val -= buf_val; | ||
631 | if ((raw[i]).pulse) | ||
632 | reg_buf[count] |= BUF_PULSE_BIT; | ||
633 | count++; | ||
634 | } | ||
635 | } | ||
636 | |||
637 | ret = nvt_write_wakeup_codes(dev, reg_buf, count); | ||
638 | |||
639 | kfree(reg_buf); | ||
640 | out_raw: | ||
641 | kfree(raw); | ||
642 | |||
643 | return ret; | ||
644 | } | ||
645 | |||
646 | /* Dummy implementation. nuvoton is agnostic to the protocol used */ | ||
647 | static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev, | ||
648 | u64 *rc_type) | ||
649 | { | ||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | /* | 529 | /* |
654 | * nvt_tx_ir | 530 | * nvt_tx_ir |
655 | * | 531 | * |
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) | |||
1167 | /* Set up the rc device */ | 1043 | /* Set up the rc device */ |
1168 | rdev->priv = nvt; | 1044 | rdev->priv = nvt; |
1169 | rdev->driver_type = RC_DRIVER_IR_RAW; | 1045 | rdev->driver_type = RC_DRIVER_IR_RAW; |
1170 | rdev->encode_wakeup = true; | ||
1171 | rdev->allowed_protocols = RC_BIT_ALL; | 1046 | rdev->allowed_protocols = RC_BIT_ALL; |
1172 | rdev->open = nvt_open; | 1047 | rdev->open = nvt_open; |
1173 | rdev->close = nvt_close; | 1048 | rdev->close = nvt_close; |
1174 | rdev->tx_ir = nvt_tx_ir; | 1049 | rdev->tx_ir = nvt_tx_ir; |
1175 | rdev->s_tx_carrier = nvt_set_tx_carrier; | 1050 | rdev->s_tx_carrier = nvt_set_tx_carrier; |
1176 | rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter; | ||
1177 | rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol; | ||
1178 | rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; | 1051 | rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; |
1179 | rdev->input_phys = "nuvoton/cir0"; | 1052 | rdev->input_phys = "nuvoton/cir0"; |
1180 | rdev->input_id.bustype = BUS_HOST; | 1053 | rdev->input_id.bustype = BUS_HOST; |
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 9d0e161c2a88..e1cf23c3875b 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h | |||
@@ -63,7 +63,6 @@ static int debug; | |||
63 | */ | 63 | */ |
64 | #define TX_BUF_LEN 256 | 64 | #define TX_BUF_LEN 256 |
65 | #define RX_BUF_LEN 32 | 65 | #define RX_BUF_LEN 32 |
66 | #define WAKE_FIFO_LEN 67 | ||
67 | 66 | ||
68 | struct nvt_dev { | 67 | struct nvt_dev { |
69 | struct pnp_dev *pdev; | 68 | struct pnp_dev *pdev; |
diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h index 4b994aa2f2a7..b68d4f762734 100644 --- a/drivers/media/rc/rc-core-priv.h +++ b/drivers/media/rc/rc-core-priv.h | |||
@@ -25,8 +25,6 @@ struct ir_raw_handler { | |||
25 | 25 | ||
26 | u64 protocols; /* which are handled by this handler */ | 26 | u64 protocols; /* which are handled by this handler */ |
27 | int (*decode)(struct rc_dev *dev, struct ir_raw_event event); | 27 | int (*decode)(struct rc_dev *dev, struct ir_raw_event event); |
28 | int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode, | ||
29 | struct ir_raw_event *events, unsigned int max); | ||
30 | 28 | ||
31 | /* These two should only be used by the lirc decoder */ | 29 | /* These two should only be used by the lirc decoder */ |
32 | int (*raw_register)(struct rc_dev *dev); | 30 | int (*raw_register)(struct rc_dev *dev); |
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev) | |||
152 | #define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) | 150 | #define TO_US(duration) DIV_ROUND_CLOSEST((duration), 1000) |
153 | #define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") | 151 | #define TO_STR(is_pulse) ((is_pulse) ? "pulse" : "space") |
154 | 152 | ||
155 | /* functions for IR encoders */ | ||
156 | |||
157 | static inline void init_ir_raw_event_duration(struct ir_raw_event *ev, | ||
158 | unsigned int pulse, | ||
159 | u32 duration) | ||
160 | { | ||
161 | init_ir_raw_event(ev); | ||
162 | ev->duration = duration; | ||
163 | ev->pulse = pulse; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * struct ir_raw_timings_manchester - Manchester coding timings | ||
168 | * @leader: duration of leader pulse (if any) 0 if continuing | ||
169 | * existing signal (see @pulse_space_start) | ||
170 | * @pulse_space_start: 1 for starting with pulse (0 for starting with space) | ||
171 | * @clock: duration of each pulse/space in ns | ||
172 | * @invert: if set clock logic is inverted | ||
173 | * (0 = space + pulse, 1 = pulse + space) | ||
174 | * @trailer_space: duration of trailer space in ns | ||
175 | */ | ||
176 | struct ir_raw_timings_manchester { | ||
177 | unsigned int leader; | ||
178 | unsigned int pulse_space_start:1; | ||
179 | unsigned int clock; | ||
180 | unsigned int invert:1; | ||
181 | unsigned int trailer_space; | ||
182 | }; | ||
183 | |||
184 | int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, | ||
185 | const struct ir_raw_timings_manchester *timings, | ||
186 | unsigned int n, unsigned int data); | ||
187 | |||
188 | /* | 153 | /* |
189 | * Routines from rc-raw.c to be used internally and by decoders | 154 | * Routines from rc-raw.c to be used internally and by decoders |
190 | */ | 155 | */ |
191 | u64 ir_raw_get_allowed_protocols(void); | 156 | u64 ir_raw_get_allowed_protocols(void); |
192 | u64 ir_raw_get_encode_protocols(void); | ||
193 | int ir_raw_event_register(struct rc_dev *dev); | 157 | int ir_raw_event_register(struct rc_dev *dev); |
194 | void ir_raw_event_unregister(struct rc_dev *dev); | 158 | void ir_raw_event_unregister(struct rc_dev *dev); |
195 | int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); | 159 | int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler); |
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index b9e4645c731c..b732ac6a26d8 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c | |||
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list); | |||
30 | static DEFINE_MUTEX(ir_raw_handler_lock); | 30 | static DEFINE_MUTEX(ir_raw_handler_lock); |
31 | static LIST_HEAD(ir_raw_handler_list); | 31 | static LIST_HEAD(ir_raw_handler_list); |
32 | static u64 available_protocols; | 32 | static u64 available_protocols; |
33 | static u64 encode_protocols; | ||
34 | 33 | ||
35 | static int ir_raw_event_thread(void *data) | 34 | static int ir_raw_event_thread(void *data) |
36 | { | 35 | { |
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void) | |||
241 | return protocols; | 240 | return protocols; |
242 | } | 241 | } |
243 | 242 | ||
244 | /* used internally by the sysfs interface */ | ||
245 | u64 | ||
246 | ir_raw_get_encode_protocols(void) | ||
247 | { | ||
248 | u64 protocols; | ||
249 | |||
250 | mutex_lock(&ir_raw_handler_lock); | ||
251 | protocols = encode_protocols; | ||
252 | mutex_unlock(&ir_raw_handler_lock); | ||
253 | return protocols; | ||
254 | } | ||
255 | |||
256 | static int change_protocol(struct rc_dev *dev, u64 *rc_type) | 243 | static int change_protocol(struct rc_dev *dev, u64 *rc_type) |
257 | { | 244 | { |
258 | /* the caller will update dev->enabled_protocols */ | 245 | /* the caller will update dev->enabled_protocols */ |
259 | return 0; | 246 | return 0; |
260 | } | 247 | } |
261 | 248 | ||
262 | /** | ||
263 | * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation. | ||
264 | * @ev: Pointer to pointer to next free event. *@ev is incremented for | ||
265 | * each raw event filled. | ||
266 | * @max: Maximum number of raw events to fill. | ||
267 | * @timings: Manchester modulation timings. | ||
268 | * @n: Number of bits of data. | ||
269 | * @data: Data bits to encode. | ||
270 | * | ||
271 | * Encodes the @n least significant bits of @data using Manchester (bi-phase) | ||
272 | * modulation with the timing characteristics described by @timings, writing up | ||
273 | * to @max raw IR events using the *@ev pointer. | ||
274 | * | ||
275 | * Returns: 0 on success. | ||
276 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
277 | * full encoded data. In this case all @max events will have been | ||
278 | * written. | ||
279 | */ | ||
280 | int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, | ||
281 | const struct ir_raw_timings_manchester *timings, | ||
282 | unsigned int n, unsigned int data) | ||
283 | { | ||
284 | bool need_pulse; | ||
285 | unsigned int i; | ||
286 | int ret = -ENOBUFS; | ||
287 | |||
288 | i = 1 << (n - 1); | ||
289 | |||
290 | if (timings->leader) { | ||
291 | if (!max--) | ||
292 | return ret; | ||
293 | if (timings->pulse_space_start) { | ||
294 | init_ir_raw_event_duration((*ev)++, 1, timings->leader); | ||
295 | |||
296 | if (!max--) | ||
297 | return ret; | ||
298 | init_ir_raw_event_duration((*ev), 0, timings->leader); | ||
299 | } else { | ||
300 | init_ir_raw_event_duration((*ev), 1, timings->leader); | ||
301 | } | ||
302 | i >>= 1; | ||
303 | } else { | ||
304 | /* continue existing signal */ | ||
305 | --(*ev); | ||
306 | } | ||
307 | /* from here on *ev will point to the last event rather than the next */ | ||
308 | |||
309 | while (n && i > 0) { | ||
310 | need_pulse = !(data & i); | ||
311 | if (timings->invert) | ||
312 | need_pulse = !need_pulse; | ||
313 | if (need_pulse == !!(*ev)->pulse) { | ||
314 | (*ev)->duration += timings->clock; | ||
315 | } else { | ||
316 | if (!max--) | ||
317 | goto nobufs; | ||
318 | init_ir_raw_event_duration(++(*ev), need_pulse, | ||
319 | timings->clock); | ||
320 | } | ||
321 | |||
322 | if (!max--) | ||
323 | goto nobufs; | ||
324 | init_ir_raw_event_duration(++(*ev), !need_pulse, | ||
325 | timings->clock); | ||
326 | i >>= 1; | ||
327 | } | ||
328 | |||
329 | if (timings->trailer_space) { | ||
330 | if (!(*ev)->pulse) | ||
331 | (*ev)->duration += timings->trailer_space; | ||
332 | else if (!max--) | ||
333 | goto nobufs; | ||
334 | else | ||
335 | init_ir_raw_event_duration(++(*ev), 0, | ||
336 | timings->trailer_space); | ||
337 | } | ||
338 | |||
339 | ret = 0; | ||
340 | nobufs: | ||
341 | /* point to the next event rather than last event before returning */ | ||
342 | ++(*ev); | ||
343 | return ret; | ||
344 | } | ||
345 | EXPORT_SYMBOL(ir_raw_gen_manchester); | ||
346 | |||
347 | /** | ||
348 | * ir_raw_encode_scancode() - Encode a scancode as raw events | ||
349 | * | ||
350 | * @protocols: permitted protocols | ||
351 | * @scancode: scancode filter describing a single scancode | ||
352 | * @events: array of raw events to write into | ||
353 | * @max: max number of raw events | ||
354 | * | ||
355 | * Attempts to encode the scancode as raw events. | ||
356 | * | ||
357 | * Returns: The number of events written. | ||
358 | * -ENOBUFS if there isn't enough space in the array to fit the | ||
359 | * encoding. In this case all @max events will have been written. | ||
360 | * -EINVAL if the scancode is ambiguous or invalid, or if no | ||
361 | * compatible encoder was found. | ||
362 | */ | ||
363 | int ir_raw_encode_scancode(u64 protocols, | ||
364 | const struct rc_scancode_filter *scancode, | ||
365 | struct ir_raw_event *events, unsigned int max) | ||
366 | { | ||
367 | struct ir_raw_handler *handler; | ||
368 | int ret = -EINVAL; | ||
369 | |||
370 | mutex_lock(&ir_raw_handler_lock); | ||
371 | list_for_each_entry(handler, &ir_raw_handler_list, list) { | ||
372 | if (handler->protocols & protocols && handler->encode) { | ||
373 | ret = handler->encode(protocols, scancode, events, max); | ||
374 | if (ret >= 0 || ret == -ENOBUFS) | ||
375 | break; | ||
376 | } | ||
377 | } | ||
378 | mutex_unlock(&ir_raw_handler_lock); | ||
379 | |||
380 | return ret; | ||
381 | } | ||
382 | EXPORT_SYMBOL(ir_raw_encode_scancode); | ||
383 | |||
384 | /* | 249 | /* |
385 | * Used to (un)register raw event clients | 250 | * Used to (un)register raw event clients |
386 | */ | 251 | */ |
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) | |||
463 | list_for_each_entry(raw, &ir_raw_client_list, list) | 328 | list_for_each_entry(raw, &ir_raw_client_list, list) |
464 | ir_raw_handler->raw_register(raw->dev); | 329 | ir_raw_handler->raw_register(raw->dev); |
465 | available_protocols |= ir_raw_handler->protocols; | 330 | available_protocols |= ir_raw_handler->protocols; |
466 | if (ir_raw_handler->encode) | ||
467 | encode_protocols |= ir_raw_handler->protocols; | ||
468 | mutex_unlock(&ir_raw_handler_lock); | 331 | mutex_unlock(&ir_raw_handler_lock); |
469 | 332 | ||
470 | return 0; | 333 | return 0; |
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) | |||
481 | list_for_each_entry(raw, &ir_raw_client_list, list) | 344 | list_for_each_entry(raw, &ir_raw_client_list, list) |
482 | ir_raw_handler->raw_unregister(raw->dev); | 345 | ir_raw_handler->raw_unregister(raw->dev); |
483 | available_protocols &= ~ir_raw_handler->protocols; | 346 | available_protocols &= ~ir_raw_handler->protocols; |
484 | if (ir_raw_handler->encode) | ||
485 | encode_protocols &= ~ir_raw_handler->protocols; | ||
486 | mutex_unlock(&ir_raw_handler_lock); | 347 | mutex_unlock(&ir_raw_handler_lock); |
487 | } | 348 | } |
488 | EXPORT_SYMBOL(ir_raw_handler_unregister); | 349 | EXPORT_SYMBOL(ir_raw_handler_unregister); |
diff --git a/drivers/media/rc/rc-loopback.c b/drivers/media/rc/rc-loopback.c index d8bdf63ce985..63dace8198b0 100644 --- a/drivers/media/rc/rc-loopback.c +++ b/drivers/media/rc/rc-loopback.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
29 | #include <linux/slab.h> | ||
30 | #include <media/rc-core.h> | 29 | #include <media/rc-core.h> |
31 | 30 | ||
32 | #define DRIVER_NAME "rc-loopback" | 31 | #define DRIVER_NAME "rc-loopback" |
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable) | |||
177 | return 0; | 176 | return 0; |
178 | } | 177 | } |
179 | 178 | ||
180 | static int loop_set_wakeup_filter(struct rc_dev *dev, | ||
181 | struct rc_scancode_filter *sc_filter) | ||
182 | { | ||
183 | static const unsigned int max = 512; | ||
184 | struct ir_raw_event *raw; | ||
185 | int ret; | ||
186 | int i; | ||
187 | |||
188 | /* fine to disable filter */ | ||
189 | if (!sc_filter->mask) | ||
190 | return 0; | ||
191 | |||
192 | /* encode the specified filter and loop it back */ | ||
193 | raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL); | ||
194 | ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter, | ||
195 | raw, max); | ||
196 | /* still loop back the partial raw IR even if it's incomplete */ | ||
197 | if (ret == -ENOBUFS) | ||
198 | ret = max; | ||
199 | if (ret >= 0) { | ||
200 | /* do the loopback */ | ||
201 | for (i = 0; i < ret; ++i) | ||
202 | ir_raw_event_store(dev, &raw[i]); | ||
203 | ir_raw_event_handle(dev); | ||
204 | |||
205 | ret = 0; | ||
206 | } | ||
207 | |||
208 | kfree(raw); | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | static int __init loop_init(void) | 179 | static int __init loop_init(void) |
214 | { | 180 | { |
215 | struct rc_dev *rc; | 181 | struct rc_dev *rc; |
@@ -229,7 +195,6 @@ static int __init loop_init(void) | |||
229 | rc->map_name = RC_MAP_EMPTY; | 195 | rc->map_name = RC_MAP_EMPTY; |
230 | rc->priv = &loopdev; | 196 | rc->priv = &loopdev; |
231 | rc->driver_type = RC_DRIVER_IR_RAW; | 197 | rc->driver_type = RC_DRIVER_IR_RAW; |
232 | rc->encode_wakeup = true; | ||
233 | rc->allowed_protocols = RC_BIT_ALL; | 198 | rc->allowed_protocols = RC_BIT_ALL; |
234 | rc->timeout = 100 * 1000 * 1000; /* 100 ms */ | 199 | rc->timeout = 100 * 1000 * 1000; /* 100 ms */ |
235 | rc->min_timeout = 1; | 200 | rc->min_timeout = 1; |
@@ -244,7 +209,6 @@ static int __init loop_init(void) | |||
244 | rc->s_idle = loop_set_idle; | 209 | rc->s_idle = loop_set_idle; |
245 | rc->s_learning_mode = loop_set_learning_mode; | 210 | rc->s_learning_mode = loop_set_learning_mode; |
246 | rc->s_carrier_report = loop_set_carrier_report; | 211 | rc->s_carrier_report = loop_set_carrier_report; |
247 | rc->s_wakeup_filter = loop_set_wakeup_filter; | ||
248 | 212 | ||
249 | loopdev.txmask = RXMASK_REGULAR; | 213 | loopdev.txmask = RXMASK_REGULAR; |
250 | loopdev.txcarrier = 36000; | 214 | loopdev.txcarrier = 36000; |
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 9d015db65280..0ff388a16168 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c | |||
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device, | |||
865 | } else { | 865 | } else { |
866 | enabled = dev->enabled_wakeup_protocols; | 866 | enabled = dev->enabled_wakeup_protocols; |
867 | allowed = dev->allowed_wakeup_protocols; | 867 | allowed = dev->allowed_wakeup_protocols; |
868 | if (dev->encode_wakeup && !allowed) | ||
869 | allowed = ir_raw_get_encode_protocols(); | ||
870 | } | 868 | } |
871 | 869 | ||
872 | mutex_unlock(&dev->lock); | 870 | mutex_unlock(&dev->lock); |
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev) | |||
1408 | path ? path : "N/A"); | 1406 | path ? path : "N/A"); |
1409 | kfree(path); | 1407 | kfree(path); |
1410 | 1408 | ||
1411 | if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) { | 1409 | if (dev->driver_type == RC_DRIVER_IR_RAW) { |
1412 | /* Load raw decoders, if they aren't already */ | 1410 | /* Load raw decoders, if they aren't already */ |
1413 | if (!raw_init) { | 1411 | if (!raw_init) { |
1414 | IR_dprintk(1, "Loading raw decoders\n"); | 1412 | IR_dprintk(1, "Loading raw decoders\n"); |
1415 | ir_raw_init(); | 1413 | ir_raw_init(); |
1416 | raw_init = true; | 1414 | raw_init = true; |
1417 | } | 1415 | } |
1418 | } | ||
1419 | |||
1420 | if (dev->driver_type == RC_DRIVER_IR_RAW) { | ||
1421 | /* calls ir_register_device so unlock mutex here*/ | 1416 | /* calls ir_register_device so unlock mutex here*/ |
1422 | mutex_unlock(&dev->lock); | 1417 | mutex_unlock(&dev->lock); |
1423 | rc = ir_raw_event_register(dev); | 1418 | rc = ir_raw_event_register(dev); |
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 93b315459098..a14c428f70e9 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c | |||
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) | |||
715 | break; | 715 | break; |
716 | case VB2_BUF_STATE_PREPARING: | 716 | case VB2_BUF_STATE_PREPARING: |
717 | case VB2_BUF_STATE_DEQUEUED: | 717 | case VB2_BUF_STATE_DEQUEUED: |
718 | case VB2_BUF_STATE_REQUEUEING: | ||
718 | /* nothing */ | 719 | /* nothing */ |
719 | break; | 720 | break; |
720 | } | 721 | } |
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
1182 | 1183 | ||
1183 | if (WARN_ON(state != VB2_BUF_STATE_DONE && | 1184 | if (WARN_ON(state != VB2_BUF_STATE_DONE && |
1184 | state != VB2_BUF_STATE_ERROR && | 1185 | state != VB2_BUF_STATE_ERROR && |
1185 | state != VB2_BUF_STATE_QUEUED)) | 1186 | state != VB2_BUF_STATE_QUEUED && |
1187 | state != VB2_BUF_STATE_REQUEUEING)) | ||
1186 | state = VB2_BUF_STATE_ERROR; | 1188 | state = VB2_BUF_STATE_ERROR; |
1187 | 1189 | ||
1188 | #ifdef CONFIG_VIDEO_ADV_DEBUG | 1190 | #ifdef CONFIG_VIDEO_ADV_DEBUG |
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) | |||
1199 | for (plane = 0; plane < vb->num_planes; ++plane) | 1201 | for (plane = 0; plane < vb->num_planes; ++plane) |
1200 | call_void_memop(vb, finish, vb->planes[plane].mem_priv); | 1202 | call_void_memop(vb, finish, vb->planes[plane].mem_priv); |
1201 | 1203 | ||
1202 | /* Add the buffer to the done buffers list */ | ||
1203 | spin_lock_irqsave(&q->done_lock, flags); | 1204 | spin_lock_irqsave(&q->done_lock, flags); |
1204 | vb->state = state; | 1205 | if (state == VB2_BUF_STATE_QUEUED || |
1205 | if (state != VB2_BUF_STATE_QUEUED) | 1206 | state == VB2_BUF_STATE_REQUEUEING) { |
1207 | vb->state = VB2_BUF_STATE_QUEUED; | ||
1208 | } else { | ||
1209 | /* Add the buffer to the done buffers list */ | ||
1206 | list_add_tail(&vb->done_entry, &q->done_list); | 1210 | list_add_tail(&vb->done_entry, &q->done_list); |
1211 | vb->state = state; | ||
1212 | } | ||
1207 | atomic_dec(&q->owned_by_drv_count); | 1213 | atomic_dec(&q->owned_by_drv_count); |
1208 | spin_unlock_irqrestore(&q->done_lock, flags); | 1214 | spin_unlock_irqrestore(&q->done_lock, flags); |
1209 | 1215 | ||
1210 | if (state == VB2_BUF_STATE_QUEUED) { | 1216 | switch (state) { |
1217 | case VB2_BUF_STATE_QUEUED: | ||
1218 | return; | ||
1219 | case VB2_BUF_STATE_REQUEUEING: | ||
1211 | if (q->start_streaming_called) | 1220 | if (q->start_streaming_called) |
1212 | __enqueue_in_driver(vb); | 1221 | __enqueue_in_driver(vb); |
1213 | return; | 1222 | return; |
1223 | default: | ||
1224 | /* Inform any processes that may be waiting for buffers */ | ||
1225 | wake_up(&q->done_wq); | ||
1226 | break; | ||
1214 | } | 1227 | } |
1215 | |||
1216 | /* Inform any processes that may be waiting for buffers */ | ||
1217 | wake_up(&q->done_wq); | ||
1218 | } | 1228 | } |
1219 | EXPORT_SYMBOL_GPL(vb2_buffer_done); | 1229 | EXPORT_SYMBOL_GPL(vb2_buffer_done); |
1220 | 1230 | ||
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done); | |||
1244 | 1254 | ||
1245 | static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) | 1255 | static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) |
1246 | { | 1256 | { |
1247 | static bool __check_once __read_mostly; | 1257 | static bool check_once; |
1248 | 1258 | ||
1249 | if (__check_once) | 1259 | if (check_once) |
1250 | return; | 1260 | return; |
1251 | 1261 | ||
1252 | __check_once = true; | 1262 | check_once = true; |
1253 | __WARN(); | 1263 | WARN_ON(1); |
1254 | 1264 | ||
1255 | pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n"); | 1265 | pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); |
1256 | if (vb->vb2_queue->allow_zero_bytesused) | 1266 | if (vb->vb2_queue->allow_zero_bytesused) |
1257 | pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); | 1267 | pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); |
1258 | else | 1268 | else |
1259 | pr_warn_once("use the actual size instead.\n"); | 1269 | pr_warn("use the actual size instead.\n"); |
1260 | } | 1270 | } |
1261 | 1271 | ||
1262 | /** | 1272 | /** |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 6b94007ae052..838545ce468d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -854,6 +854,18 @@ static int pcan_usb_probe(struct usb_interface *intf) | |||
854 | /* | 854 | /* |
855 | * describe the PCAN-USB adapter | 855 | * describe the PCAN-USB adapter |
856 | */ | 856 | */ |
857 | static const struct can_bittiming_const pcan_usb_const = { | ||
858 | .name = "pcan_usb", | ||
859 | .tseg1_min = 1, | ||
860 | .tseg1_max = 16, | ||
861 | .tseg2_min = 1, | ||
862 | .tseg2_max = 8, | ||
863 | .sjw_max = 4, | ||
864 | .brp_min = 1, | ||
865 | .brp_max = 64, | ||
866 | .brp_inc = 1, | ||
867 | }; | ||
868 | |||
857 | const struct peak_usb_adapter pcan_usb = { | 869 | const struct peak_usb_adapter pcan_usb = { |
858 | .name = "PCAN-USB", | 870 | .name = "PCAN-USB", |
859 | .device_id = PCAN_USB_PRODUCT_ID, | 871 | .device_id = PCAN_USB_PRODUCT_ID, |
@@ -862,17 +874,7 @@ const struct peak_usb_adapter pcan_usb = { | |||
862 | .clock = { | 874 | .clock = { |
863 | .freq = PCAN_USB_CRYSTAL_HZ / 2 , | 875 | .freq = PCAN_USB_CRYSTAL_HZ / 2 , |
864 | }, | 876 | }, |
865 | .bittiming_const = { | 877 | .bittiming_const = &pcan_usb_const, |
866 | .name = "pcan_usb", | ||
867 | .tseg1_min = 1, | ||
868 | .tseg1_max = 16, | ||
869 | .tseg2_min = 1, | ||
870 | .tseg2_max = 8, | ||
871 | .sjw_max = 4, | ||
872 | .brp_min = 1, | ||
873 | .brp_max = 64, | ||
874 | .brp_inc = 1, | ||
875 | }, | ||
876 | 878 | ||
877 | /* size of device private data */ | 879 | /* size of device private data */ |
878 | .sizeof_dev_private = sizeof(struct pcan_usb), | 880 | .sizeof_dev_private = sizeof(struct pcan_usb), |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 7921cff93a63..5a2e341a6d1e 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c | |||
@@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, | |||
792 | dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx]; | 792 | dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx]; |
793 | 793 | ||
794 | dev->can.clock = peak_usb_adapter->clock; | 794 | dev->can.clock = peak_usb_adapter->clock; |
795 | dev->can.bittiming_const = &peak_usb_adapter->bittiming_const; | 795 | dev->can.bittiming_const = peak_usb_adapter->bittiming_const; |
796 | dev->can.do_set_bittiming = peak_usb_set_bittiming; | 796 | dev->can.do_set_bittiming = peak_usb_set_bittiming; |
797 | dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const; | 797 | dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const; |
798 | dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming; | 798 | dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming; |
799 | dev->can.do_set_mode = peak_usb_set_mode; | 799 | dev->can.do_set_mode = peak_usb_set_mode; |
800 | dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter; | 800 | dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index 9e624f05ad4d..506fe506c9d3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h | |||
@@ -48,8 +48,8 @@ struct peak_usb_adapter { | |||
48 | u32 device_id; | 48 | u32 device_id; |
49 | u32 ctrlmode_supported; | 49 | u32 ctrlmode_supported; |
50 | struct can_clock clock; | 50 | struct can_clock clock; |
51 | const struct can_bittiming_const bittiming_const; | 51 | const struct can_bittiming_const * const bittiming_const; |
52 | const struct can_bittiming_const data_bittiming_const; | 52 | const struct can_bittiming_const * const data_bittiming_const; |
53 | unsigned int ctrl_count; | 53 | unsigned int ctrl_count; |
54 | 54 | ||
55 | int (*intf_probe)(struct usb_interface *intf); | 55 | int (*intf_probe)(struct usb_interface *intf); |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 09d14e70abd7..ce44a033f63b 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c | |||
@@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev) | |||
990 | } | 990 | } |
991 | 991 | ||
992 | /* describes the PCAN-USB FD adapter */ | 992 | /* describes the PCAN-USB FD adapter */ |
993 | static const struct can_bittiming_const pcan_usb_fd_const = { | ||
994 | .name = "pcan_usb_fd", | ||
995 | .tseg1_min = 1, | ||
996 | .tseg1_max = 64, | ||
997 | .tseg2_min = 1, | ||
998 | .tseg2_max = 16, | ||
999 | .sjw_max = 16, | ||
1000 | .brp_min = 1, | ||
1001 | .brp_max = 1024, | ||
1002 | .brp_inc = 1, | ||
1003 | }; | ||
1004 | |||
1005 | static const struct can_bittiming_const pcan_usb_fd_data_const = { | ||
1006 | .name = "pcan_usb_fd", | ||
1007 | .tseg1_min = 1, | ||
1008 | .tseg1_max = 16, | ||
1009 | .tseg2_min = 1, | ||
1010 | .tseg2_max = 8, | ||
1011 | .sjw_max = 4, | ||
1012 | .brp_min = 1, | ||
1013 | .brp_max = 1024, | ||
1014 | .brp_inc = 1, | ||
1015 | }; | ||
1016 | |||
993 | const struct peak_usb_adapter pcan_usb_fd = { | 1017 | const struct peak_usb_adapter pcan_usb_fd = { |
994 | .name = "PCAN-USB FD", | 1018 | .name = "PCAN-USB FD", |
995 | .device_id = PCAN_USBFD_PRODUCT_ID, | 1019 | .device_id = PCAN_USBFD_PRODUCT_ID, |
@@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = { | |||
999 | .clock = { | 1023 | .clock = { |
1000 | .freq = PCAN_UFD_CRYSTAL_HZ, | 1024 | .freq = PCAN_UFD_CRYSTAL_HZ, |
1001 | }, | 1025 | }, |
1002 | .bittiming_const = { | 1026 | .bittiming_const = &pcan_usb_fd_const, |
1003 | .name = "pcan_usb_fd", | 1027 | .data_bittiming_const = &pcan_usb_fd_data_const, |
1004 | .tseg1_min = 1, | ||
1005 | .tseg1_max = 64, | ||
1006 | .tseg2_min = 1, | ||
1007 | .tseg2_max = 16, | ||
1008 | .sjw_max = 16, | ||
1009 | .brp_min = 1, | ||
1010 | .brp_max = 1024, | ||
1011 | .brp_inc = 1, | ||
1012 | }, | ||
1013 | .data_bittiming_const = { | ||
1014 | .name = "pcan_usb_fd", | ||
1015 | .tseg1_min = 1, | ||
1016 | .tseg1_max = 16, | ||
1017 | .tseg2_min = 1, | ||
1018 | .tseg2_max = 8, | ||
1019 | .sjw_max = 4, | ||
1020 | .brp_min = 1, | ||
1021 | .brp_max = 1024, | ||
1022 | .brp_inc = 1, | ||
1023 | }, | ||
1024 | 1028 | ||
1025 | /* size of device private data */ | 1029 | /* size of device private data */ |
1026 | .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), | 1030 | .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), |
@@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = { | |||
1058 | }; | 1062 | }; |
1059 | 1063 | ||
1060 | /* describes the PCAN-USB Pro FD adapter */ | 1064 | /* describes the PCAN-USB Pro FD adapter */ |
1065 | static const struct can_bittiming_const pcan_usb_pro_fd_const = { | ||
1066 | .name = "pcan_usb_pro_fd", | ||
1067 | .tseg1_min = 1, | ||
1068 | .tseg1_max = 64, | ||
1069 | .tseg2_min = 1, | ||
1070 | .tseg2_max = 16, | ||
1071 | .sjw_max = 16, | ||
1072 | .brp_min = 1, | ||
1073 | .brp_max = 1024, | ||
1074 | .brp_inc = 1, | ||
1075 | }; | ||
1076 | |||
1077 | static const struct can_bittiming_const pcan_usb_pro_fd_data_const = { | ||
1078 | .name = "pcan_usb_pro_fd", | ||
1079 | .tseg1_min = 1, | ||
1080 | .tseg1_max = 16, | ||
1081 | .tseg2_min = 1, | ||
1082 | .tseg2_max = 8, | ||
1083 | .sjw_max = 4, | ||
1084 | .brp_min = 1, | ||
1085 | .brp_max = 1024, | ||
1086 | .brp_inc = 1, | ||
1087 | }; | ||
1088 | |||
1061 | const struct peak_usb_adapter pcan_usb_pro_fd = { | 1089 | const struct peak_usb_adapter pcan_usb_pro_fd = { |
1062 | .name = "PCAN-USB Pro FD", | 1090 | .name = "PCAN-USB Pro FD", |
1063 | .device_id = PCAN_USBPROFD_PRODUCT_ID, | 1091 | .device_id = PCAN_USBPROFD_PRODUCT_ID, |
@@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { | |||
1067 | .clock = { | 1095 | .clock = { |
1068 | .freq = PCAN_UFD_CRYSTAL_HZ, | 1096 | .freq = PCAN_UFD_CRYSTAL_HZ, |
1069 | }, | 1097 | }, |
1070 | .bittiming_const = { | 1098 | .bittiming_const = &pcan_usb_pro_fd_const, |
1071 | .name = "pcan_usb_pro_fd", | 1099 | .data_bittiming_const = &pcan_usb_pro_fd_data_const, |
1072 | .tseg1_min = 1, | ||
1073 | .tseg1_max = 64, | ||
1074 | .tseg2_min = 1, | ||
1075 | .tseg2_max = 16, | ||
1076 | .sjw_max = 16, | ||
1077 | .brp_min = 1, | ||
1078 | .brp_max = 1024, | ||
1079 | .brp_inc = 1, | ||
1080 | }, | ||
1081 | .data_bittiming_const = { | ||
1082 | .name = "pcan_usb_pro_fd", | ||
1083 | .tseg1_min = 1, | ||
1084 | .tseg1_max = 16, | ||
1085 | .tseg2_min = 1, | ||
1086 | .tseg2_max = 8, | ||
1087 | .sjw_max = 4, | ||
1088 | .brp_min = 1, | ||
1089 | .brp_max = 1024, | ||
1090 | .brp_inc = 1, | ||
1091 | }, | ||
1092 | 1100 | ||
1093 | /* size of device private data */ | 1101 | /* size of device private data */ |
1094 | .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), | 1102 | .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 7d61b3279798..bbdd6058cd2f 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
@@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf) | |||
1004 | /* | 1004 | /* |
1005 | * describe the PCAN-USB Pro adapter | 1005 | * describe the PCAN-USB Pro adapter |
1006 | */ | 1006 | */ |
1007 | static const struct can_bittiming_const pcan_usb_pro_const = { | ||
1008 | .name = "pcan_usb_pro", | ||
1009 | .tseg1_min = 1, | ||
1010 | .tseg1_max = 16, | ||
1011 | .tseg2_min = 1, | ||
1012 | .tseg2_max = 8, | ||
1013 | .sjw_max = 4, | ||
1014 | .brp_min = 1, | ||
1015 | .brp_max = 1024, | ||
1016 | .brp_inc = 1, | ||
1017 | }; | ||
1018 | |||
1007 | const struct peak_usb_adapter pcan_usb_pro = { | 1019 | const struct peak_usb_adapter pcan_usb_pro = { |
1008 | .name = "PCAN-USB Pro", | 1020 | .name = "PCAN-USB Pro", |
1009 | .device_id = PCAN_USBPRO_PRODUCT_ID, | 1021 | .device_id = PCAN_USBPRO_PRODUCT_ID, |
@@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = { | |||
1012 | .clock = { | 1024 | .clock = { |
1013 | .freq = PCAN_USBPRO_CRYSTAL_HZ, | 1025 | .freq = PCAN_USBPRO_CRYSTAL_HZ, |
1014 | }, | 1026 | }, |
1015 | .bittiming_const = { | 1027 | .bittiming_const = &pcan_usb_pro_const, |
1016 | .name = "pcan_usb_pro", | ||
1017 | .tseg1_min = 1, | ||
1018 | .tseg1_max = 16, | ||
1019 | .tseg2_min = 1, | ||
1020 | .tseg2_max = 8, | ||
1021 | .sjw_max = 4, | ||
1022 | .brp_min = 1, | ||
1023 | .brp_max = 1024, | ||
1024 | .brp_inc = 1, | ||
1025 | }, | ||
1026 | 1028 | ||
1027 | /* size of device private data */ | 1029 | /* size of device private data */ |
1028 | .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), | 1030 | .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index c51014b0464f..b52e0f63f9a3 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -65,7 +65,7 @@ obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ | |||
65 | obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ | 65 | obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ |
66 | obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ | 66 | obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/ |
67 | obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ | 67 | obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ |
68 | obj-$(CONFIG_SH_ETH) += renesas/ | 68 | obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/ |
69 | obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ | 69 | obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ |
70 | obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ | 70 | obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/ |
71 | obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ | 71 | obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/ |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index a626c4315a89..cfa37041ab71 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -801,6 +801,9 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) | |||
801 | 801 | ||
802 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) | 802 | void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) |
803 | { | 803 | { |
804 | if (pdata->phy_dev) | ||
805 | phy_disconnect(pdata->phy_dev); | ||
806 | |||
804 | mdiobus_unregister(pdata->mdio_bus); | 807 | mdiobus_unregister(pdata->mdio_bus); |
805 | mdiobus_free(pdata->mdio_bus); | 808 | mdiobus_free(pdata->mdio_bus); |
806 | pdata->mdio_bus = NULL; | 809 | pdata->mdio_bus = NULL; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 299eb4315fe6..a02ea7f8fdae 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1277,9 +1277,10 @@ static int xgene_enet_remove(struct platform_device *pdev) | |||
1277 | mac_ops->tx_disable(pdata); | 1277 | mac_ops->tx_disable(pdata); |
1278 | 1278 | ||
1279 | xgene_enet_napi_del(pdata); | 1279 | xgene_enet_napi_del(pdata); |
1280 | xgene_enet_mdio_remove(pdata); | 1280 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
1281 | xgene_enet_delete_desc_rings(pdata); | 1281 | xgene_enet_mdio_remove(pdata); |
1282 | unregister_netdev(ndev); | 1282 | unregister_netdev(ndev); |
1283 | xgene_enet_delete_desc_rings(pdata); | ||
1283 | pdata->port_ops->shutdown(pdata); | 1284 | pdata->port_ops->shutdown(pdata); |
1284 | free_netdev(ndev); | 1285 | free_netdev(ndev); |
1285 | 1286 | ||
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 64c1e9db6b0b..09ff09f828d0 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
@@ -2126,6 +2126,8 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
2126 | int ret = 0; | 2126 | int ret = 0; |
2127 | int timeout = 0; | 2127 | int timeout = 0; |
2128 | u32 reg; | 2128 | u32 reg; |
2129 | u32 dma_ctrl; | ||
2130 | int i; | ||
2129 | 2131 | ||
2130 | /* Disable TDMA to stop add more frames in TX DMA */ | 2132 | /* Disable TDMA to stop add more frames in TX DMA */ |
2131 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | 2133 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); |
@@ -2169,6 +2171,20 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) | |||
2169 | ret = -ETIMEDOUT; | 2171 | ret = -ETIMEDOUT; |
2170 | } | 2172 | } |
2171 | 2173 | ||
2174 | dma_ctrl = 0; | ||
2175 | for (i = 0; i < priv->hw_params->rx_queues; i++) | ||
2176 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | ||
2177 | reg = bcmgenet_rdma_readl(priv, DMA_CTRL); | ||
2178 | reg &= ~dma_ctrl; | ||
2179 | bcmgenet_rdma_writel(priv, reg, DMA_CTRL); | ||
2180 | |||
2181 | dma_ctrl = 0; | ||
2182 | for (i = 0; i < priv->hw_params->tx_queues; i++) | ||
2183 | dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); | ||
2184 | reg = bcmgenet_tdma_readl(priv, DMA_CTRL); | ||
2185 | reg &= ~dma_ctrl; | ||
2186 | bcmgenet_tdma_writel(priv, reg, DMA_CTRL); | ||
2187 | |||
2172 | return ret; | 2188 | return ret; |
2173 | } | 2189 | } |
2174 | 2190 | ||
@@ -2820,8 +2836,6 @@ static void bcmgenet_timeout(struct net_device *dev) | |||
2820 | 2836 | ||
2821 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); | 2837 | netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); |
2822 | 2838 | ||
2823 | bcmgenet_disable_tx_napi(priv); | ||
2824 | |||
2825 | for (q = 0; q < priv->hw_params->tx_queues; q++) | 2839 | for (q = 0; q < priv->hw_params->tx_queues; q++) |
2826 | bcmgenet_dump_tx_queue(&priv->tx_rings[q]); | 2840 | bcmgenet_dump_tx_queue(&priv->tx_rings[q]); |
2827 | bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); | 2841 | bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); |
@@ -2837,8 +2851,6 @@ static void bcmgenet_timeout(struct net_device *dev) | |||
2837 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); | 2851 | bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); |
2838 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); | 2852 | bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); |
2839 | 2853 | ||
2840 | bcmgenet_enable_tx_napi(priv); | ||
2841 | |||
2842 | dev->trans_start = jiffies; | 2854 | dev->trans_start = jiffies; |
2843 | 2855 | ||
2844 | dev->stats.tx_errors++; | 2856 | dev->stats.tx_errors++; |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c28e3bfdccd7..6ca693b03f33 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -5174,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, | |||
5174 | struct device *dev = &adapter->pdev->dev; | 5174 | struct device *dev = &adapter->pdev->dev; |
5175 | int status; | 5175 | int status; |
5176 | 5176 | ||
5177 | if (lancer_chip(adapter) || BEx_chip(adapter)) | 5177 | if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) |
5178 | return; | 5178 | return; |
5179 | 5179 | ||
5180 | if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { | 5180 | if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { |
@@ -5221,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, | |||
5221 | { | 5221 | { |
5222 | struct be_adapter *adapter = netdev_priv(netdev); | 5222 | struct be_adapter *adapter = netdev_priv(netdev); |
5223 | 5223 | ||
5224 | if (lancer_chip(adapter) || BEx_chip(adapter)) | 5224 | if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) |
5225 | return; | 5225 | return; |
5226 | 5226 | ||
5227 | if (adapter->vxlan_port != port) | 5227 | if (adapter->vxlan_port != port) |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 271bb5862346..b349e6f36ea7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -1778,7 +1778,7 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
1778 | return ret; | 1778 | return ret; |
1779 | 1779 | ||
1780 | fep->mii_timeout = 0; | 1780 | fep->mii_timeout = 0; |
1781 | init_completion(&fep->mdio_done); | 1781 | reinit_completion(&fep->mdio_done); |
1782 | 1782 | ||
1783 | /* start a read op */ | 1783 | /* start a read op */ |
1784 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | | 1784 | writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | |
@@ -1817,7 +1817,7 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
1817 | return ret; | 1817 | return ret; |
1818 | 1818 | ||
1819 | fep->mii_timeout = 0; | 1819 | fep->mii_timeout = 0; |
1820 | init_completion(&fep->mdio_done); | 1820 | reinit_completion(&fep->mdio_done); |
1821 | 1821 | ||
1822 | /* start a write op */ | 1822 | /* start a write op */ |
1823 | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | | 1823 | writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2b7610f341b0..10b3bbbbac8e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev) | |||
2102 | /* Start Rx/Tx DMA and enable the interrupts */ | 2102 | /* Start Rx/Tx DMA and enable the interrupts */ |
2103 | gfar_start(priv); | 2103 | gfar_start(priv); |
2104 | 2104 | ||
2105 | /* force link state update after mac reset */ | ||
2106 | priv->oldlink = 0; | ||
2107 | priv->oldspeed = 0; | ||
2108 | priv->oldduplex = -1; | ||
2109 | |||
2105 | phy_start(priv->phydev); | 2110 | phy_start(priv->phydev); |
2106 | 2111 | ||
2107 | enable_napi(priv); | 2112 | enable_napi(priv); |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 982fdcdc795b..b5b2925103ec 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, | |||
216 | 216 | ||
217 | static inline bool fm10k_page_is_reserved(struct page *page) | 217 | static inline bool fm10k_page_is_reserved(struct page *page) |
218 | { | 218 | { |
219 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | 219 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
220 | } | 220 | } |
221 | 221 | ||
222 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, | 222 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 2f70a9b152bd..830466c49987 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, | |||
6566 | 6566 | ||
6567 | static inline bool igb_page_is_reserved(struct page *page) | 6567 | static inline bool igb_page_is_reserved(struct page *page) |
6568 | { | 6568 | { |
6569 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | 6569 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
6570 | } | 6570 | } |
6571 | 6571 | ||
6572 | static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | 6572 | static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9aa6104e34ea..ae21e0b06c3a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, | |||
1832 | 1832 | ||
1833 | static inline bool ixgbe_page_is_reserved(struct page *page) | 1833 | static inline bool ixgbe_page_is_reserved(struct page *page) |
1834 | { | 1834 | { |
1835 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | 1835 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
1836 | } | 1836 | } |
1837 | 1837 | ||
1838 | /** | 1838 | /** |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index e71cdde9cb01..1d7b00b038a2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, | |||
765 | 765 | ||
766 | static inline bool ixgbevf_page_is_reserved(struct page *page) | 766 | static inline bool ixgbevf_page_is_reserved(struct page *page) |
767 | { | 767 | { |
768 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | 768 | return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); |
769 | } | 769 | } |
770 | 770 | ||
771 | /** | 771 | /** |
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index f78909a00f15..09d2e16fd6b0 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c | |||
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev) | |||
952 | 952 | ||
953 | sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, | 953 | sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, |
954 | tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); | 954 | tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); |
955 | err = dma_mapping_error(adapter->dev, | 955 | if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) { |
956 | sg_dma_address(&tx_ctl->sg)); | 956 | err = -ENOMEM; |
957 | if (err) { | ||
958 | sg_dma_address(&tx_ctl->sg) = 0; | 957 | sg_dma_address(&tx_ctl->sg) = 0; |
959 | goto err; | 958 | goto err; |
960 | } | 959 | } |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 605cc8948594..b1a4ea21c91c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -1282,7 +1282,12 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, | |||
1282 | } | 1282 | } |
1283 | } | 1283 | } |
1284 | 1284 | ||
1285 | if (core_stats) { | 1285 | if (!core_stats) |
1286 | return stats_count; | ||
1287 | |||
1288 | if (nic_data->datapath_caps & | ||
1289 | 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { | ||
1290 | /* Use vadaptor stats. */ | ||
1286 | core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + | 1291 | core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + |
1287 | stats[EF10_STAT_rx_multicast] + | 1292 | stats[EF10_STAT_rx_multicast] + |
1288 | stats[EF10_STAT_rx_broadcast]; | 1293 | stats[EF10_STAT_rx_broadcast]; |
@@ -1302,6 +1307,26 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, | |||
1302 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; | 1307 | core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; |
1303 | core_stats->rx_errors = core_stats->rx_crc_errors; | 1308 | core_stats->rx_errors = core_stats->rx_crc_errors; |
1304 | core_stats->tx_errors = stats[EF10_STAT_tx_bad]; | 1309 | core_stats->tx_errors = stats[EF10_STAT_tx_bad]; |
1310 | } else { | ||
1311 | /* Use port stats. */ | ||
1312 | core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; | ||
1313 | core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; | ||
1314 | core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; | ||
1315 | core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; | ||
1316 | core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + | ||
1317 | stats[GENERIC_STAT_rx_nodesc_trunc] + | ||
1318 | stats[GENERIC_STAT_rx_noskb_drops]; | ||
1319 | core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; | ||
1320 | core_stats->rx_length_errors = | ||
1321 | stats[EF10_STAT_port_rx_gtjumbo] + | ||
1322 | stats[EF10_STAT_port_rx_length_error]; | ||
1323 | core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; | ||
1324 | core_stats->rx_frame_errors = | ||
1325 | stats[EF10_STAT_port_rx_align_error]; | ||
1326 | core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; | ||
1327 | core_stats->rx_errors = (core_stats->rx_length_errors + | ||
1328 | core_stats->rx_crc_errors + | ||
1329 | core_stats->rx_frame_errors); | ||
1305 | } | 1330 | } |
1306 | 1331 | ||
1307 | return stats_count; | 1332 | return stats_count; |
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 1960b46add65..d7a65247f952 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c | |||
@@ -290,6 +290,15 @@ struct phy_device *fixed_phy_register(unsigned int irq, | |||
290 | return ERR_PTR(-EINVAL); | 290 | return ERR_PTR(-EINVAL); |
291 | } | 291 | } |
292 | 292 | ||
293 | /* propagate the fixed link values to struct phy_device */ | ||
294 | phy->link = status->link; | ||
295 | if (status->link) { | ||
296 | phy->speed = status->speed; | ||
297 | phy->duplex = status->duplex; | ||
298 | phy->pause = status->pause; | ||
299 | phy->asym_pause = status->asym_pause; | ||
300 | } | ||
301 | |||
293 | of_node_get(np); | 302 | of_node_get(np); |
294 | phy->dev.of_node = np; | 303 | phy->dev.of_node = np; |
295 | 304 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b2197b506acb..34fe339f4e80 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work) | |||
811 | bool needs_aneg = false, do_suspend = false; | 811 | bool needs_aneg = false, do_suspend = false; |
812 | enum phy_state old_state; | 812 | enum phy_state old_state; |
813 | int err = 0; | 813 | int err = 0; |
814 | int old_link; | ||
814 | 815 | ||
815 | mutex_lock(&phydev->lock); | 816 | mutex_lock(&phydev->lock); |
816 | 817 | ||
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work) | |||
896 | phydev->adjust_link(phydev->attached_dev); | 897 | phydev->adjust_link(phydev->attached_dev); |
897 | break; | 898 | break; |
898 | case PHY_RUNNING: | 899 | case PHY_RUNNING: |
899 | /* Only register a CHANGE if we are | 900 | /* Only register a CHANGE if we are polling or ignoring |
900 | * polling or ignoring interrupts | 901 | * interrupts and link changed since latest checking. |
901 | */ | 902 | */ |
902 | if (!phy_interrupt_is_valid(phydev)) | 903 | if (!phy_interrupt_is_valid(phydev)) { |
903 | phydev->state = PHY_CHANGELINK; | 904 | old_link = phydev->link; |
905 | err = phy_read_status(phydev); | ||
906 | if (err) | ||
907 | break; | ||
908 | |||
909 | if (old_link != phydev->link) | ||
910 | phydev->state = PHY_CHANGELINK; | ||
911 | } | ||
904 | break; | 912 | break; |
905 | case PHY_CHANGELINK: | 913 | case PHY_CHANGELINK: |
906 | err = phy_read_status(phydev); | 914 | err = phy_read_status(phydev); |
@@ -1030,10 +1038,14 @@ int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, | |||
1030 | int value = -1; | 1038 | int value = -1; |
1031 | 1039 | ||
1032 | if (phydrv->read_mmd_indirect == NULL) { | 1040 | if (phydrv->read_mmd_indirect == NULL) { |
1033 | mmd_phy_indirect(phydev->bus, prtad, devad, addr); | 1041 | struct mii_bus *bus = phydev->bus; |
1042 | |||
1043 | mutex_lock(&bus->mdio_lock); | ||
1044 | mmd_phy_indirect(bus, prtad, devad, addr); | ||
1034 | 1045 | ||
1035 | /* Read the content of the MMD's selected register */ | 1046 | /* Read the content of the MMD's selected register */ |
1036 | value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA); | 1047 | value = bus->read(bus, addr, MII_MMD_DATA); |
1048 | mutex_unlock(&bus->mdio_lock); | ||
1037 | } else { | 1049 | } else { |
1038 | value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr); | 1050 | value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr); |
1039 | } | 1051 | } |
@@ -1063,10 +1075,14 @@ void phy_write_mmd_indirect(struct phy_device *phydev, int prtad, | |||
1063 | struct phy_driver *phydrv = phydev->drv; | 1075 | struct phy_driver *phydrv = phydev->drv; |
1064 | 1076 | ||
1065 | if (phydrv->write_mmd_indirect == NULL) { | 1077 | if (phydrv->write_mmd_indirect == NULL) { |
1066 | mmd_phy_indirect(phydev->bus, prtad, devad, addr); | 1078 | struct mii_bus *bus = phydev->bus; |
1079 | |||
1080 | mutex_lock(&bus->mdio_lock); | ||
1081 | mmd_phy_indirect(bus, prtad, devad, addr); | ||
1067 | 1082 | ||
1068 | /* Write the data into MMD's selected register */ | 1083 | /* Write the data into MMD's selected register */ |
1069 | phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data); | 1084 | bus->write(bus, addr, MII_MMD_DATA, data); |
1085 | mutex_unlock(&bus->mdio_lock); | ||
1070 | } else { | 1086 | } else { |
1071 | phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); | 1087 | phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data); |
1072 | } | 1088 | } |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0302483de240..55f01788df5e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -176,7 +176,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, | |||
176 | if (c45_ids) | 176 | if (c45_ids) |
177 | dev->c45_ids = *c45_ids; | 177 | dev->c45_ids = *c45_ids; |
178 | dev->bus = bus; | 178 | dev->bus = bus; |
179 | dev->dev.parent = bus->parent; | 179 | dev->dev.parent = &bus->dev; |
180 | dev->dev.bus = &mdio_bus_type; | 180 | dev->dev.bus = &mdio_bus_type; |
181 | dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL; | 181 | dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL; |
182 | dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr); | 182 | dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr); |
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c0f6479e19d4..70b08958763a 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c | |||
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each | 94 | * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable |
95 | * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner | 95 | * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to |
96 | * does send the pulses within this interval, the PHY will remained powered | 96 | * unstable detection of plugging in Ethernet cable. |
97 | * down. | 97 | * This workaround disables Energy Detect Power-Down mode and waiting for |
98 | * | 98 | * response on link pulses to detect presence of plugged Ethernet cable. |
99 | * This workaround will manually toggle the PHY on/off upon calls to read_status | 99 | * The Energy Detect Power-Down mode is enabled again in the end of procedure to |
100 | * in order to generate link test pulses if the link is down. If a link partner | 100 | * save approximately 220 mW of power if cable is unplugged. |
101 | * is present, it will respond to the pulses, which will cause the ENERGYON bit | ||
102 | * to be set and will cause the EDPD mode to be exited. | ||
103 | */ | 101 | */ |
104 | static int lan87xx_read_status(struct phy_device *phydev) | 102 | static int lan87xx_read_status(struct phy_device *phydev) |
105 | { | 103 | { |
106 | int err = genphy_read_status(phydev); | 104 | int err = genphy_read_status(phydev); |
105 | int i; | ||
107 | 106 | ||
108 | if (!phydev->link) { | 107 | if (!phydev->link) { |
109 | /* Disable EDPD to wake up PHY */ | 108 | /* Disable EDPD to wake up PHY */ |
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev) | |||
116 | if (rc < 0) | 115 | if (rc < 0) |
117 | return rc; | 116 | return rc; |
118 | 117 | ||
119 | /* Sleep 64 ms to allow ~5 link test pulses to be sent */ | 118 | /* Wait max 640 ms to detect energy */ |
120 | msleep(64); | 119 | for (i = 0; i < 64; i++) { |
120 | /* Sleep to allow link test pulses to be sent */ | ||
121 | msleep(10); | ||
122 | rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); | ||
123 | if (rc < 0) | ||
124 | return rc; | ||
125 | if (rc & MII_LAN83C185_ENERGYON) | ||
126 | break; | ||
127 | } | ||
121 | 128 | ||
122 | /* Re-enable EDPD */ | 129 | /* Re-enable EDPD */ |
123 | rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); | 130 | rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); |
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = { | |||
191 | 198 | ||
192 | /* basic functions */ | 199 | /* basic functions */ |
193 | .config_aneg = genphy_config_aneg, | 200 | .config_aneg = genphy_config_aneg, |
194 | .read_status = genphy_read_status, | 201 | .read_status = lan87xx_read_status, |
195 | .config_init = smsc_phy_config_init, | 202 | .config_init = smsc_phy_config_init, |
196 | .soft_reset = smsc_phy_reset, | 203 | .soft_reset = smsc_phy_reset, |
197 | 204 | ||
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 9d15566521a7..fa8f5046afe9 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); | |||
269 | static void ppp_ccp_closed(struct ppp *ppp); | 269 | static void ppp_ccp_closed(struct ppp *ppp); |
270 | static struct compressor *find_compressor(int type); | 270 | static struct compressor *find_compressor(int type); |
271 | static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); | 271 | static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); |
272 | static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp); | 272 | static struct ppp *ppp_create_interface(struct net *net, int unit, |
273 | struct file *file, int *retp); | ||
273 | static void init_ppp_file(struct ppp_file *pf, int kind); | 274 | static void init_ppp_file(struct ppp_file *pf, int kind); |
274 | static void ppp_shutdown_interface(struct ppp *ppp); | ||
275 | static void ppp_destroy_interface(struct ppp *ppp); | 275 | static void ppp_destroy_interface(struct ppp *ppp); |
276 | static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); | 276 | static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); |
277 | static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); | 277 | static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); |
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file) | |||
392 | file->private_data = NULL; | 392 | file->private_data = NULL; |
393 | if (pf->kind == INTERFACE) { | 393 | if (pf->kind == INTERFACE) { |
394 | ppp = PF_TO_PPP(pf); | 394 | ppp = PF_TO_PPP(pf); |
395 | rtnl_lock(); | ||
395 | if (file == ppp->owner) | 396 | if (file == ppp->owner) |
396 | ppp_shutdown_interface(ppp); | 397 | unregister_netdevice(ppp->dev); |
398 | rtnl_unlock(); | ||
397 | } | 399 | } |
398 | if (atomic_dec_and_test(&pf->refcnt)) { | 400 | if (atomic_dec_and_test(&pf->refcnt)) { |
399 | switch (pf->kind) { | 401 | switch (pf->kind) { |
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
593 | mutex_lock(&ppp_mutex); | 595 | mutex_lock(&ppp_mutex); |
594 | if (pf->kind == INTERFACE) { | 596 | if (pf->kind == INTERFACE) { |
595 | ppp = PF_TO_PPP(pf); | 597 | ppp = PF_TO_PPP(pf); |
598 | rtnl_lock(); | ||
596 | if (file == ppp->owner) | 599 | if (file == ppp->owner) |
597 | ppp_shutdown_interface(ppp); | 600 | unregister_netdevice(ppp->dev); |
601 | rtnl_unlock(); | ||
598 | } | 602 | } |
599 | if (atomic_long_read(&file->f_count) < 2) { | 603 | if (atomic_long_read(&file->f_count) < 2) { |
600 | ppp_release(NULL, file); | 604 | ppp_release(NULL, file); |
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, | |||
838 | /* Create a new ppp unit */ | 842 | /* Create a new ppp unit */ |
839 | if (get_user(unit, p)) | 843 | if (get_user(unit, p)) |
840 | break; | 844 | break; |
841 | ppp = ppp_create_interface(net, unit, &err); | 845 | ppp = ppp_create_interface(net, unit, file, &err); |
842 | if (!ppp) | 846 | if (!ppp) |
843 | break; | 847 | break; |
844 | file->private_data = &ppp->file; | 848 | file->private_data = &ppp->file; |
845 | ppp->owner = file; | ||
846 | err = -EFAULT; | 849 | err = -EFAULT; |
847 | if (put_user(ppp->file.index, p)) | 850 | if (put_user(ppp->file.index, p)) |
848 | break; | 851 | break; |
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net) | |||
916 | static __net_exit void ppp_exit_net(struct net *net) | 919 | static __net_exit void ppp_exit_net(struct net *net) |
917 | { | 920 | { |
918 | struct ppp_net *pn = net_generic(net, ppp_net_id); | 921 | struct ppp_net *pn = net_generic(net, ppp_net_id); |
922 | struct ppp *ppp; | ||
923 | LIST_HEAD(list); | ||
924 | int id; | ||
925 | |||
926 | rtnl_lock(); | ||
927 | idr_for_each_entry(&pn->units_idr, ppp, id) | ||
928 | unregister_netdevice_queue(ppp->dev, &list); | ||
929 | |||
930 | unregister_netdevice_many(&list); | ||
931 | rtnl_unlock(); | ||
919 | 932 | ||
920 | idr_destroy(&pn->units_idr); | 933 | idr_destroy(&pn->units_idr); |
921 | } | 934 | } |
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev) | |||
1088 | return 0; | 1101 | return 0; |
1089 | } | 1102 | } |
1090 | 1103 | ||
1104 | static void ppp_dev_uninit(struct net_device *dev) | ||
1105 | { | ||
1106 | struct ppp *ppp = netdev_priv(dev); | ||
1107 | struct ppp_net *pn = ppp_pernet(ppp->ppp_net); | ||
1108 | |||
1109 | ppp_lock(ppp); | ||
1110 | ppp->closing = 1; | ||
1111 | ppp_unlock(ppp); | ||
1112 | |||
1113 | mutex_lock(&pn->all_ppp_mutex); | ||
1114 | unit_put(&pn->units_idr, ppp->file.index); | ||
1115 | mutex_unlock(&pn->all_ppp_mutex); | ||
1116 | |||
1117 | ppp->owner = NULL; | ||
1118 | |||
1119 | ppp->file.dead = 1; | ||
1120 | wake_up_interruptible(&ppp->file.rwait); | ||
1121 | } | ||
1122 | |||
1091 | static const struct net_device_ops ppp_netdev_ops = { | 1123 | static const struct net_device_ops ppp_netdev_ops = { |
1092 | .ndo_init = ppp_dev_init, | 1124 | .ndo_init = ppp_dev_init, |
1125 | .ndo_uninit = ppp_dev_uninit, | ||
1093 | .ndo_start_xmit = ppp_start_xmit, | 1126 | .ndo_start_xmit = ppp_start_xmit, |
1094 | .ndo_do_ioctl = ppp_net_ioctl, | 1127 | .ndo_do_ioctl = ppp_net_ioctl, |
1095 | .ndo_get_stats64 = ppp_get_stats64, | 1128 | .ndo_get_stats64 = ppp_get_stats64, |
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) | |||
2667 | * or if there is already a unit with the requested number. | 2700 | * or if there is already a unit with the requested number. |
2668 | * unit == -1 means allocate a new number. | 2701 | * unit == -1 means allocate a new number. |
2669 | */ | 2702 | */ |
2670 | static struct ppp * | 2703 | static struct ppp *ppp_create_interface(struct net *net, int unit, |
2671 | ppp_create_interface(struct net *net, int unit, int *retp) | 2704 | struct file *file, int *retp) |
2672 | { | 2705 | { |
2673 | struct ppp *ppp; | 2706 | struct ppp *ppp; |
2674 | struct ppp_net *pn; | 2707 | struct ppp_net *pn; |
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp) | |||
2688 | ppp->mru = PPP_MRU; | 2721 | ppp->mru = PPP_MRU; |
2689 | init_ppp_file(&ppp->file, INTERFACE); | 2722 | init_ppp_file(&ppp->file, INTERFACE); |
2690 | ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ | 2723 | ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ |
2724 | ppp->owner = file; | ||
2691 | for (i = 0; i < NUM_NP; ++i) | 2725 | for (i = 0; i < NUM_NP; ++i) |
2692 | ppp->npmode[i] = NPMODE_PASS; | 2726 | ppp->npmode[i] = NPMODE_PASS; |
2693 | INIT_LIST_HEAD(&ppp->channels); | 2727 | INIT_LIST_HEAD(&ppp->channels); |
@@ -2776,34 +2810,6 @@ init_ppp_file(struct ppp_file *pf, int kind) | |||
2776 | } | 2810 | } |
2777 | 2811 | ||
2778 | /* | 2812 | /* |
2779 | * Take down a ppp interface unit - called when the owning file | ||
2780 | * (the one that created the unit) is closed or detached. | ||
2781 | */ | ||
2782 | static void ppp_shutdown_interface(struct ppp *ppp) | ||
2783 | { | ||
2784 | struct ppp_net *pn; | ||
2785 | |||
2786 | pn = ppp_pernet(ppp->ppp_net); | ||
2787 | mutex_lock(&pn->all_ppp_mutex); | ||
2788 | |||
2789 | /* This will call dev_close() for us. */ | ||
2790 | ppp_lock(ppp); | ||
2791 | if (!ppp->closing) { | ||
2792 | ppp->closing = 1; | ||
2793 | ppp_unlock(ppp); | ||
2794 | unregister_netdev(ppp->dev); | ||
2795 | unit_put(&pn->units_idr, ppp->file.index); | ||
2796 | } else | ||
2797 | ppp_unlock(ppp); | ||
2798 | |||
2799 | ppp->file.dead = 1; | ||
2800 | ppp->owner = NULL; | ||
2801 | wake_up_interruptible(&ppp->file.rwait); | ||
2802 | |||
2803 | mutex_unlock(&pn->all_ppp_mutex); | ||
2804 | } | ||
2805 | |||
2806 | /* | ||
2807 | * Free the memory used by a ppp unit. This is only called once | 2813 | * Free the memory used by a ppp unit. This is only called once |
2808 | * there are no channels connected to the unit and no file structs | 2814 | * there are no channels connected to the unit and no file structs |
2809 | * that reference the unit. | 2815 | * that reference the unit. |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 9d43460ce3c7..64a60afbe50c 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = { | |||
785 | {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ | 785 | {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ |
786 | {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ | 786 | {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ |
787 | {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ | 787 | {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ |
788 | {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ | ||
788 | {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ | 789 | {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ |
789 | 790 | ||
790 | /* 4. Gobi 1000 devices */ | 791 | /* 4. Gobi 1000 devices */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 3c86b107275a..e0498571ae26 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net) | |||
778 | { | 778 | { |
779 | struct usbnet *dev = netdev_priv(net); | 779 | struct usbnet *dev = netdev_priv(net); |
780 | struct driver_info *info = dev->driver_info; | 780 | struct driver_info *info = dev->driver_info; |
781 | int retval, pm; | 781 | int retval, pm, mpn; |
782 | 782 | ||
783 | clear_bit(EVENT_DEV_OPEN, &dev->flags); | 783 | clear_bit(EVENT_DEV_OPEN, &dev->flags); |
784 | netif_stop_queue (net); | 784 | netif_stop_queue (net); |
@@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net) | |||
809 | 809 | ||
810 | usbnet_purge_paused_rxq(dev); | 810 | usbnet_purge_paused_rxq(dev); |
811 | 811 | ||
812 | mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags); | ||
813 | |||
812 | /* deferred work (task, timer, softirq) must also stop. | 814 | /* deferred work (task, timer, softirq) must also stop. |
813 | * can't flush_scheduled_work() until we drop rtnl (later), | 815 | * can't flush_scheduled_work() until we drop rtnl (later), |
814 | * else workers could deadlock; so make workers a NOP. | 816 | * else workers could deadlock; so make workers a NOP. |
@@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net) | |||
819 | if (!pm) | 821 | if (!pm) |
820 | usb_autopm_put_interface(dev->intf); | 822 | usb_autopm_put_interface(dev->intf); |
821 | 823 | ||
822 | if (info->manage_power && | 824 | if (info->manage_power && mpn) |
823 | !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) | ||
824 | info->manage_power(dev, 0); | 825 | info->manage_power(dev, 0); |
825 | else | 826 | else |
826 | usb_autopm_put_interface(dev->intf); | 827 | usb_autopm_put_interface(dev->intf); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 34c519eb1db5..5bc4b1ed67b3 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2216,6 +2216,8 @@ static int vxlan_open(struct net_device *dev) | |||
2216 | 2216 | ||
2217 | if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { | 2217 | if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) { |
2218 | ret = vxlan_igmp_join(vxlan); | 2218 | ret = vxlan_igmp_join(vxlan); |
2219 | if (ret == -EADDRINUSE) | ||
2220 | ret = 0; | ||
2219 | if (ret) { | 2221 | if (ret) { |
2220 | vxlan_sock_release(vs); | 2222 | vxlan_sock_release(vs); |
2221 | return ret; | 2223 | return ret; |
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 73de4efcbe6e..944f50015ed0 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
@@ -2,7 +2,7 @@ | |||
2 | # PCI configuration | 2 | # PCI configuration |
3 | # | 3 | # |
4 | config PCI_BUS_ADDR_T_64BIT | 4 | config PCI_BUS_ADDR_T_64BIT |
5 | def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) | 5 | def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC)) |
6 | depends on PCI | 6 | depends on PCI |
7 | 7 | ||
8 | config PCI_MSI | 8 | config PCI_MSI |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index cefd636681b6..f6ae0d0052eb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev) | |||
997 | else if (type == PCI_EXP_TYPE_UPSTREAM || | 997 | else if (type == PCI_EXP_TYPE_UPSTREAM || |
998 | type == PCI_EXP_TYPE_DOWNSTREAM) { | 998 | type == PCI_EXP_TYPE_DOWNSTREAM) { |
999 | parent = pci_upstream_bridge(pdev); | 999 | parent = pci_upstream_bridge(pdev); |
1000 | if (!parent->has_secondary_link) | 1000 | |
1001 | /* | ||
1002 | * Usually there's an upstream device (Root Port or Switch | ||
1003 | * Downstream Port), but we can't assume one exists. | ||
1004 | */ | ||
1005 | if (parent && !parent->has_secondary_link) | ||
1001 | pdev->has_secondary_link = 1; | 1006 | pdev->has_secondary_link = 1; |
1002 | } | 1007 | } |
1003 | } | 1008 | } |
@@ -1103,7 +1108,7 @@ int pci_cfg_space_size(struct pci_dev *dev) | |||
1103 | 1108 | ||
1104 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) | 1109 | #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
1105 | 1110 | ||
1106 | static void pci_msi_setup_pci_dev(struct pci_dev *dev) | 1111 | void pci_msi_setup_pci_dev(struct pci_dev *dev) |
1107 | { | 1112 | { |
1108 | /* | 1113 | /* |
1109 | * Disable the MSI hardware to avoid screaming interrupts | 1114 | * Disable the MSI hardware to avoid screaming interrupts |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 26270c351624..ce129e595b55 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -39,7 +39,7 @@ | |||
39 | 39 | ||
40 | #define DRV_NAME "fnic" | 40 | #define DRV_NAME "fnic" |
41 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | 41 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" |
42 | #define DRV_VERSION "1.6.0.17" | 42 | #define DRV_VERSION "1.6.0.17a" |
43 | #define PFX DRV_NAME ": " | 43 | #define PFX DRV_NAME ": " |
44 | #define DFX DRV_NAME "%d: " | 44 | #define DFX DRV_NAME "%d: " |
45 | 45 | ||
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 155b286f1a9d..25436cd2860c 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
425 | unsigned long ptr; | 425 | unsigned long ptr; |
426 | struct fc_rport_priv *rdata; | 426 | struct fc_rport_priv *rdata; |
427 | spinlock_t *io_lock = NULL; | 427 | spinlock_t *io_lock = NULL; |
428 | int io_lock_acquired = 0; | ||
428 | 429 | ||
429 | if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) | 430 | if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) |
430 | return SCSI_MLQUEUE_HOST_BUSY; | 431 | return SCSI_MLQUEUE_HOST_BUSY; |
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ | |||
518 | spin_lock_irqsave(io_lock, flags); | 519 | spin_lock_irqsave(io_lock, flags); |
519 | 520 | ||
520 | /* initialize rest of io_req */ | 521 | /* initialize rest of io_req */ |
522 | io_lock_acquired = 1; | ||
521 | io_req->port_id = rport->port_id; | 523 | io_req->port_id = rport->port_id; |
522 | io_req->start_time = jiffies; | 524 | io_req->start_time = jiffies; |
523 | CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; | 525 | CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING; |
@@ -571,7 +573,7 @@ out: | |||
571 | (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); | 573 | (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); |
572 | 574 | ||
573 | /* if only we issued IO, will we have the io lock */ | 575 | /* if only we issued IO, will we have the io lock */ |
574 | if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED) | 576 | if (io_lock_acquired) |
575 | spin_unlock_irqrestore(io_lock, flags); | 577 | spin_unlock_irqrestore(io_lock, flags); |
576 | 578 | ||
577 | atomic_dec(&fnic->in_flight); | 579 | atomic_dec(&fnic->in_flight); |
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 9e43ae1d2163..e4b799837948 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c | |||
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev) | |||
217 | { | 217 | { |
218 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 218 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
219 | struct scsi_device *sdev = to_scsi_device(dev); | 219 | struct scsi_device *sdev = to_scsi_device(dev); |
220 | int err; | 220 | int err = 0; |
221 | 221 | ||
222 | err = blk_pre_runtime_suspend(sdev->request_queue); | 222 | if (pm && pm->runtime_suspend) { |
223 | if (err) | 223 | err = blk_pre_runtime_suspend(sdev->request_queue); |
224 | return err; | 224 | if (err) |
225 | if (pm && pm->runtime_suspend) | 225 | return err; |
226 | err = pm->runtime_suspend(dev); | 226 | err = pm->runtime_suspend(dev); |
227 | blk_post_runtime_suspend(sdev->request_queue, err); | 227 | blk_post_runtime_suspend(sdev->request_queue, err); |
228 | 228 | } | |
229 | return err; | 229 | return err; |
230 | } | 230 | } |
231 | 231 | ||
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev) | |||
248 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 248 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
249 | int err = 0; | 249 | int err = 0; |
250 | 250 | ||
251 | blk_pre_runtime_resume(sdev->request_queue); | 251 | if (pm && pm->runtime_resume) { |
252 | if (pm && pm->runtime_resume) | 252 | blk_pre_runtime_resume(sdev->request_queue); |
253 | err = pm->runtime_resume(dev); | 253 | err = pm->runtime_resume(dev); |
254 | blk_post_runtime_resume(sdev->request_queue, err); | 254 | blk_post_runtime_resume(sdev->request_queue, err); |
255 | 255 | } | |
256 | return err; | 256 | return err; |
257 | } | 257 | } |
258 | 258 | ||
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index cd77a064c772..fd092909a457 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
968 | cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; | 968 | cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA; |
969 | 969 | ||
970 | conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; | 970 | conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; |
971 | if (hdr->flags & ISCSI_FLAG_CMD_READ) { | 971 | if (hdr->flags & ISCSI_FLAG_CMD_READ) |
972 | cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); | 972 | cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); |
973 | } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) | 973 | else |
974 | cmd->targ_xfer_tag = 0xFFFFFFFF; | 974 | cmd->targ_xfer_tag = 0xFFFFFFFF; |
975 | cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); | 975 | cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); |
976 | cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); | 976 | cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index c2e9fea90b4a..860e84046177 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo) | |||
457 | if (!strcmp(t->tf_ops->name, fo->name)) { | 457 | if (!strcmp(t->tf_ops->name, fo->name)) { |
458 | BUG_ON(atomic_read(&t->tf_access_cnt)); | 458 | BUG_ON(atomic_read(&t->tf_access_cnt)); |
459 | list_del(&t->tf_list); | 459 | list_del(&t->tf_list); |
460 | mutex_unlock(&g_tf_lock); | ||
461 | /* | ||
462 | * Wait for any outstanding fabric se_deve_entry->rcu_head | ||
463 | * callbacks to complete post kfree_rcu(), before allowing | ||
464 | * fabric driver unload of TFO->module to proceed. | ||
465 | */ | ||
466 | rcu_barrier(); | ||
460 | kfree(t); | 467 | kfree(t); |
461 | break; | 468 | return; |
462 | } | 469 | } |
463 | } | 470 | } |
464 | mutex_unlock(&g_tf_lock); | 471 | mutex_unlock(&g_tf_lock); |
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 62ea4e8e70a8..be9cefc07407 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c | |||
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops) | |||
84 | list_for_each_entry(tb, &backend_list, list) { | 84 | list_for_each_entry(tb, &backend_list, list) { |
85 | if (tb->ops == ops) { | 85 | if (tb->ops == ops) { |
86 | list_del(&tb->list); | 86 | list_del(&tb->list); |
87 | mutex_unlock(&backend_mutex); | ||
88 | /* | ||
89 | * Wait for any outstanding backend driver ->rcu_head | ||
90 | * callbacks to complete post TBO->free_device() -> | ||
91 | * call_rcu(), before allowing backend driver module | ||
92 | * unload of target_backend_ops->owner to proceed. | ||
93 | */ | ||
94 | rcu_barrier(); | ||
87 | kfree(tb); | 95 | kfree(tb); |
88 | break; | 96 | return; |
89 | } | 97 | } |
90 | } | 98 | } |
91 | mutex_unlock(&backend_mutex); | 99 | mutex_unlock(&backend_mutex); |
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index b5ba1ec3c354..f87d4cef6d39 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c | |||
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1203 | struct se_dev_entry *deve; | 1203 | struct se_dev_entry *deve; |
1204 | struct se_session *sess = cmd->se_sess; | 1204 | struct se_session *sess = cmd->se_sess; |
1205 | struct se_node_acl *nacl; | 1205 | struct se_node_acl *nacl; |
1206 | struct scsi_lun slun; | ||
1206 | unsigned char *buf; | 1207 | unsigned char *buf; |
1207 | u32 lun_count = 0, offset = 8; | 1208 | u32 lun_count = 0, offset = 8; |
1208 | 1209 | __be32 len; | |
1209 | if (cmd->data_length < 16) { | ||
1210 | pr_warn("REPORT LUNS allocation length %u too small\n", | ||
1211 | cmd->data_length); | ||
1212 | return TCM_INVALID_CDB_FIELD; | ||
1213 | } | ||
1214 | 1210 | ||
1215 | buf = transport_kmap_data_sg(cmd); | 1211 | buf = transport_kmap_data_sg(cmd); |
1216 | if (!buf) | 1212 | if (cmd->data_length && !buf) |
1217 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; | 1213 | return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
1218 | 1214 | ||
1219 | /* | 1215 | /* |
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1221 | * coming via a target_core_mod PASSTHROUGH op, and not through | 1217 | * coming via a target_core_mod PASSTHROUGH op, and not through |
1222 | * a $FABRIC_MOD. In that case, report LUN=0 only. | 1218 | * a $FABRIC_MOD. In that case, report LUN=0 only. |
1223 | */ | 1219 | */ |
1224 | if (!sess) { | 1220 | if (!sess) |
1225 | int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); | ||
1226 | lun_count = 1; | ||
1227 | goto done; | 1221 | goto done; |
1228 | } | 1222 | |
1229 | nacl = sess->se_node_acl; | 1223 | nacl = sess->se_node_acl; |
1230 | 1224 | ||
1231 | rcu_read_lock(); | 1225 | rcu_read_lock(); |
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1236 | * See SPC2-R20 7.19. | 1230 | * See SPC2-R20 7.19. |
1237 | */ | 1231 | */ |
1238 | lun_count++; | 1232 | lun_count++; |
1239 | if ((offset + 8) > cmd->data_length) | 1233 | if (offset >= cmd->data_length) |
1240 | continue; | 1234 | continue; |
1241 | 1235 | ||
1242 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); | 1236 | int_to_scsilun(deve->mapped_lun, &slun); |
1237 | memcpy(buf + offset, &slun, | ||
1238 | min(8u, cmd->data_length - offset)); | ||
1243 | offset += 8; | 1239 | offset += 8; |
1244 | } | 1240 | } |
1245 | rcu_read_unlock(); | 1241 | rcu_read_unlock(); |
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) | |||
1248 | * See SPC3 r07, page 159. | 1244 | * See SPC3 r07, page 159. |
1249 | */ | 1245 | */ |
1250 | done: | 1246 | done: |
1251 | lun_count *= 8; | 1247 | /* |
1252 | buf[0] = ((lun_count >> 24) & 0xff); | 1248 | * If no LUNs are accessible, report virtual LUN 0. |
1253 | buf[1] = ((lun_count >> 16) & 0xff); | 1249 | */ |
1254 | buf[2] = ((lun_count >> 8) & 0xff); | 1250 | if (lun_count == 0) { |
1255 | buf[3] = (lun_count & 0xff); | 1251 | int_to_scsilun(0, &slun); |
1256 | transport_kunmap_data_sg(cmd); | 1252 | if (cmd->data_length > 8) |
1253 | memcpy(buf + offset, &slun, | ||
1254 | min(8u, cmd->data_length - offset)); | ||
1255 | lun_count = 1; | ||
1256 | } | ||
1257 | |||
1258 | if (buf) { | ||
1259 | len = cpu_to_be32(lun_count * 8); | ||
1260 | memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length)); | ||
1261 | transport_kunmap_data_sg(cmd); | ||
1262 | } | ||
1257 | 1263 | ||
1258 | target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); | 1264 | target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); |
1259 | return 0; | 1265 | return 0; |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6509c61b9648..620dcd405ff6 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -68,7 +68,7 @@ struct power_table { | |||
68 | * registered cooling device. | 68 | * registered cooling device. |
69 | * @cpufreq_state: integer value representing the current state of cpufreq | 69 | * @cpufreq_state: integer value representing the current state of cpufreq |
70 | * cooling devices. | 70 | * cooling devices. |
71 | * @cpufreq_val: integer value representing the absolute value of the clipped | 71 | * @clipped_freq: integer value representing the absolute value of the clipped |
72 | * frequency. | 72 | * frequency. |
73 | * @max_level: maximum cooling level. One less than total number of valid | 73 | * @max_level: maximum cooling level. One less than total number of valid |
74 | * cpufreq frequencies. | 74 | * cpufreq frequencies. |
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device { | |||
91 | int id; | 91 | int id; |
92 | struct thermal_cooling_device *cool_dev; | 92 | struct thermal_cooling_device *cool_dev; |
93 | unsigned int cpufreq_state; | 93 | unsigned int cpufreq_state; |
94 | unsigned int cpufreq_val; | 94 | unsigned int clipped_freq; |
95 | unsigned int max_level; | 95 | unsigned int max_level; |
96 | unsigned int *freq_table; /* In descending order */ | 96 | unsigned int *freq_table; /* In descending order */ |
97 | struct cpumask allowed_cpus; | 97 | struct cpumask allowed_cpus; |
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device { | |||
107 | static DEFINE_IDR(cpufreq_idr); | 107 | static DEFINE_IDR(cpufreq_idr); |
108 | static DEFINE_MUTEX(cooling_cpufreq_lock); | 108 | static DEFINE_MUTEX(cooling_cpufreq_lock); |
109 | 109 | ||
110 | static unsigned int cpufreq_dev_count; | ||
111 | |||
112 | static DEFINE_MUTEX(cooling_list_lock); | ||
110 | static LIST_HEAD(cpufreq_dev_list); | 113 | static LIST_HEAD(cpufreq_dev_list); |
111 | 114 | ||
112 | /** | 115 | /** |
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) | |||
185 | { | 188 | { |
186 | struct cpufreq_cooling_device *cpufreq_dev; | 189 | struct cpufreq_cooling_device *cpufreq_dev; |
187 | 190 | ||
188 | mutex_lock(&cooling_cpufreq_lock); | 191 | mutex_lock(&cooling_list_lock); |
189 | list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { | 192 | list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { |
190 | if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { | 193 | if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { |
191 | mutex_unlock(&cooling_cpufreq_lock); | 194 | mutex_unlock(&cooling_list_lock); |
192 | return get_level(cpufreq_dev, freq); | 195 | return get_level(cpufreq_dev, freq); |
193 | } | 196 | } |
194 | } | 197 | } |
195 | mutex_unlock(&cooling_cpufreq_lock); | 198 | mutex_unlock(&cooling_list_lock); |
196 | 199 | ||
197 | pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); | 200 | pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); |
198 | return THERMAL_CSTATE_INVALID; | 201 | return THERMAL_CSTATE_INVALID; |
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, | |||
215 | unsigned long event, void *data) | 218 | unsigned long event, void *data) |
216 | { | 219 | { |
217 | struct cpufreq_policy *policy = data; | 220 | struct cpufreq_policy *policy = data; |
218 | unsigned long max_freq = 0; | 221 | unsigned long clipped_freq; |
219 | struct cpufreq_cooling_device *cpufreq_dev; | 222 | struct cpufreq_cooling_device *cpufreq_dev; |
220 | 223 | ||
221 | switch (event) { | 224 | if (event != CPUFREQ_ADJUST) |
225 | return NOTIFY_DONE; | ||
222 | 226 | ||
223 | case CPUFREQ_ADJUST: | 227 | mutex_lock(&cooling_list_lock); |
224 | mutex_lock(&cooling_cpufreq_lock); | 228 | list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { |
225 | list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { | 229 | if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus)) |
226 | if (!cpumask_test_cpu(policy->cpu, | 230 | continue; |
227 | &cpufreq_dev->allowed_cpus)) | ||
228 | continue; | ||
229 | 231 | ||
230 | max_freq = cpufreq_dev->cpufreq_val; | 232 | /* |
233 | * policy->max is the maximum allowed frequency defined by user | ||
234 | * and clipped_freq is the maximum that thermal constraints | ||
235 | * allow. | ||
236 | * | ||
237 | * If clipped_freq is lower than policy->max, then we need to | ||
238 | * readjust policy->max. | ||
239 | * | ||
240 | * But, if clipped_freq is greater than policy->max, we don't | ||
241 | * need to do anything. | ||
242 | */ | ||
243 | clipped_freq = cpufreq_dev->clipped_freq; | ||
231 | 244 | ||
232 | if (policy->max != max_freq) | 245 | if (policy->max > clipped_freq) |
233 | cpufreq_verify_within_limits(policy, 0, | 246 | cpufreq_verify_within_limits(policy, 0, clipped_freq); |
234 | max_freq); | ||
235 | } | ||
236 | mutex_unlock(&cooling_cpufreq_lock); | ||
237 | break; | 247 | break; |
238 | default: | ||
239 | return NOTIFY_DONE; | ||
240 | } | 248 | } |
249 | mutex_unlock(&cooling_list_lock); | ||
241 | 250 | ||
242 | return NOTIFY_OK; | 251 | return NOTIFY_OK; |
243 | } | 252 | } |
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, | |||
519 | 528 | ||
520 | clip_freq = cpufreq_device->freq_table[state]; | 529 | clip_freq = cpufreq_device->freq_table[state]; |
521 | cpufreq_device->cpufreq_state = state; | 530 | cpufreq_device->cpufreq_state = state; |
522 | cpufreq_device->cpufreq_val = clip_freq; | 531 | cpufreq_device->clipped_freq = clip_freq; |
523 | 532 | ||
524 | cpufreq_update_policy(cpu); | 533 | cpufreq_update_policy(cpu); |
525 | 534 | ||
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np, | |||
861 | pr_debug("%s: freq:%u KHz\n", __func__, freq); | 870 | pr_debug("%s: freq:%u KHz\n", __func__, freq); |
862 | } | 871 | } |
863 | 872 | ||
864 | cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0]; | 873 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; |
865 | cpufreq_dev->cool_dev = cool_dev; | 874 | cpufreq_dev->cool_dev = cool_dev; |
866 | 875 | ||
867 | mutex_lock(&cooling_cpufreq_lock); | 876 | mutex_lock(&cooling_cpufreq_lock); |
868 | 877 | ||
878 | mutex_lock(&cooling_list_lock); | ||
879 | list_add(&cpufreq_dev->node, &cpufreq_dev_list); | ||
880 | mutex_unlock(&cooling_list_lock); | ||
881 | |||
869 | /* Register the notifier for first cpufreq cooling device */ | 882 | /* Register the notifier for first cpufreq cooling device */ |
870 | if (list_empty(&cpufreq_dev_list)) | 883 | if (!cpufreq_dev_count++) |
871 | cpufreq_register_notifier(&thermal_cpufreq_notifier_block, | 884 | cpufreq_register_notifier(&thermal_cpufreq_notifier_block, |
872 | CPUFREQ_POLICY_NOTIFIER); | 885 | CPUFREQ_POLICY_NOTIFIER); |
873 | list_add(&cpufreq_dev->node, &cpufreq_dev_list); | ||
874 | |||
875 | mutex_unlock(&cooling_cpufreq_lock); | 886 | mutex_unlock(&cooling_cpufreq_lock); |
876 | 887 | ||
877 | return cool_dev; | 888 | return cool_dev; |
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) | |||
1013 | return; | 1024 | return; |
1014 | 1025 | ||
1015 | cpufreq_dev = cdev->devdata; | 1026 | cpufreq_dev = cdev->devdata; |
1016 | mutex_lock(&cooling_cpufreq_lock); | ||
1017 | list_del(&cpufreq_dev->node); | ||
1018 | 1027 | ||
1019 | /* Unregister the notifier for the last cpufreq cooling device */ | 1028 | /* Unregister the notifier for the last cpufreq cooling device */ |
1020 | if (list_empty(&cpufreq_dev_list)) | 1029 | mutex_lock(&cooling_cpufreq_lock); |
1030 | if (!--cpufreq_dev_count) | ||
1021 | cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, | 1031 | cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, |
1022 | CPUFREQ_POLICY_NOTIFIER); | 1032 | CPUFREQ_POLICY_NOTIFIER); |
1033 | |||
1034 | mutex_lock(&cooling_list_lock); | ||
1035 | list_del(&cpufreq_dev->node); | ||
1036 | mutex_unlock(&cooling_list_lock); | ||
1037 | |||
1023 | mutex_unlock(&cooling_cpufreq_lock); | 1038 | mutex_unlock(&cooling_cpufreq_lock); |
1024 | 1039 | ||
1025 | thermal_cooling_device_unregister(cpufreq_dev->cool_dev); | 1040 | thermal_cooling_device_unregister(cpufreq_dev->cool_dev); |
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 63a448f9d93b..251676902869 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c | |||
@@ -258,8 +258,7 @@ static int allocate_power(struct thermal_zone_device *tz, | |||
258 | BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); | 258 | BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); |
259 | BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); | 259 | BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); |
260 | BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); | 260 | BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power)); |
261 | req_power = devm_kcalloc(&tz->device, num_actors * 5, | 261 | req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL); |
262 | sizeof(*req_power), GFP_KERNEL); | ||
263 | if (!req_power) { | 262 | if (!req_power) { |
264 | ret = -ENOMEM; | 263 | ret = -ENOMEM; |
265 | goto unlock; | 264 | goto unlock; |
@@ -334,7 +333,7 @@ static int allocate_power(struct thermal_zone_device *tz, | |||
334 | max_allocatable_power, current_temp, | 333 | max_allocatable_power, current_temp, |
335 | (s32)control_temp - (s32)current_temp); | 334 | (s32)control_temp - (s32)current_temp); |
336 | 335 | ||
337 | devm_kfree(&tz->device, req_power); | 336 | kfree(req_power); |
338 | unlock: | 337 | unlock: |
339 | mutex_unlock(&tz->lock); | 338 | mutex_unlock(&tz->lock); |
340 | 339 | ||
@@ -426,7 +425,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz) | |||
426 | return -EINVAL; | 425 | return -EINVAL; |
427 | } | 426 | } |
428 | 427 | ||
429 | params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL); | 428 | params = kzalloc(sizeof(*params), GFP_KERNEL); |
430 | if (!params) | 429 | if (!params) |
431 | return -ENOMEM; | 430 | return -ENOMEM; |
432 | 431 | ||
@@ -468,14 +467,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz) | |||
468 | return 0; | 467 | return 0; |
469 | 468 | ||
470 | free: | 469 | free: |
471 | devm_kfree(&tz->device, params); | 470 | kfree(params); |
472 | return ret; | 471 | return ret; |
473 | } | 472 | } |
474 | 473 | ||
475 | static void power_allocator_unbind(struct thermal_zone_device *tz) | 474 | static void power_allocator_unbind(struct thermal_zone_device *tz) |
476 | { | 475 | { |
477 | dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); | 476 | dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); |
478 | devm_kfree(&tz->device, tz->governor_data); | 477 | kfree(tz->governor_data); |
479 | tz->governor_data = NULL; | 478 | tz->governor_data = NULL; |
480 | } | 479 | } |
481 | 480 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 518c6294bf6c..5fa588e933d5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -844,14 +844,15 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, | |||
844 | struct wb_iter iter; | 844 | struct wb_iter iter; |
845 | 845 | ||
846 | might_sleep(); | 846 | might_sleep(); |
847 | |||
848 | if (!bdi_has_dirty_io(bdi)) | ||
849 | return; | ||
850 | restart: | 847 | restart: |
851 | rcu_read_lock(); | 848 | rcu_read_lock(); |
852 | bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) { | 849 | bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) { |
853 | if (!wb_has_dirty_io(wb) || | 850 | /* SYNC_ALL writes out I_DIRTY_TIME too */ |
854 | (skip_if_busy && writeback_in_progress(wb))) | 851 | if (!wb_has_dirty_io(wb) && |
852 | (base_work->sync_mode == WB_SYNC_NONE || | ||
853 | list_empty(&wb->b_dirty_time))) | ||
854 | continue; | ||
855 | if (skip_if_busy && writeback_in_progress(wb)) | ||
855 | continue; | 856 | continue; |
856 | 857 | ||
857 | base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages); | 858 | base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages); |
@@ -899,8 +900,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, | |||
899 | { | 900 | { |
900 | might_sleep(); | 901 | might_sleep(); |
901 | 902 | ||
902 | if (bdi_has_dirty_io(bdi) && | 903 | if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) { |
903 | (!skip_if_busy || !writeback_in_progress(&bdi->wb))) { | ||
904 | base_work->auto_free = 0; | 904 | base_work->auto_free = 0; |
905 | base_work->single_wait = 0; | 905 | base_work->single_wait = 0; |
906 | base_work->single_done = 0; | 906 | base_work->single_done = 0; |
@@ -2275,8 +2275,12 @@ void sync_inodes_sb(struct super_block *sb) | |||
2275 | }; | 2275 | }; |
2276 | struct backing_dev_info *bdi = sb->s_bdi; | 2276 | struct backing_dev_info *bdi = sb->s_bdi; |
2277 | 2277 | ||
2278 | /* Nothing to do? */ | 2278 | /* |
2279 | if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) | 2279 | * Can't skip on !bdi_has_dirty() because we should wait for !dirty |
2280 | * inodes under writeback and I_DIRTY_TIME inodes ignored by | ||
2281 | * bdi_has_dirty() need to be written out too. | ||
2282 | */ | ||
2283 | if (bdi == &noop_backing_dev_info) | ||
2280 | return; | 2284 | return; |
2281 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 2285 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
2282 | 2286 | ||
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 799050198323..53c53c459b15 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
@@ -348,6 +348,25 @@ static inline int drm_eld_mnl(const uint8_t *eld) | |||
348 | } | 348 | } |
349 | 349 | ||
350 | /** | 350 | /** |
351 | * drm_eld_sad - Get ELD SAD structures. | ||
352 | * @eld: pointer to an eld memory structure with sad_count set | ||
353 | */ | ||
354 | static inline const uint8_t *drm_eld_sad(const uint8_t *eld) | ||
355 | { | ||
356 | unsigned int ver, mnl; | ||
357 | |||
358 | ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT; | ||
359 | if (ver != 2 && ver != 31) | ||
360 | return NULL; | ||
361 | |||
362 | mnl = drm_eld_mnl(eld); | ||
363 | if (mnl > 16) | ||
364 | return NULL; | ||
365 | |||
366 | return eld + DRM_ELD_CEA_SAD(mnl, 0); | ||
367 | } | ||
368 | |||
369 | /** | ||
351 | * drm_eld_sad_count - Get ELD SAD count. | 370 | * drm_eld_sad_count - Get ELD SAD count. |
352 | * @eld: pointer to an eld memory structure with sad_count set | 371 | * @eld: pointer to an eld memory structure with sad_count set |
353 | */ | 372 | */ |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 92188b0225bb..51744bcf74ee 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data, | |||
484 | extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); | 484 | extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); |
485 | extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, | 485 | extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, |
486 | void *vcpu_info); | 486 | void *vcpu_info); |
487 | extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); | ||
487 | #endif | 488 | #endif |
488 | 489 | ||
489 | /* Handling of unhandled and spurious interrupts: */ | 490 | /* Handling of unhandled and spurious interrupts: */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e872f92dbac..bf6f117fcf4d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1003,6 +1003,34 @@ static inline int page_mapped(struct page *page) | |||
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* | 1005 | /* |
1006 | * Return true only if the page has been allocated with | ||
1007 | * ALLOC_NO_WATERMARKS and the low watermark was not | ||
1008 | * met implying that the system is under some pressure. | ||
1009 | */ | ||
1010 | static inline bool page_is_pfmemalloc(struct page *page) | ||
1011 | { | ||
1012 | /* | ||
1013 | * Page index cannot be this large so this must be | ||
1014 | * a pfmemalloc page. | ||
1015 | */ | ||
1016 | return page->index == -1UL; | ||
1017 | } | ||
1018 | |||
1019 | /* | ||
1020 | * Only to be called by the page allocator on a freshly allocated | ||
1021 | * page. | ||
1022 | */ | ||
1023 | static inline void set_page_pfmemalloc(struct page *page) | ||
1024 | { | ||
1025 | page->index = -1UL; | ||
1026 | } | ||
1027 | |||
1028 | static inline void clear_page_pfmemalloc(struct page *page) | ||
1029 | { | ||
1030 | page->index = 0; | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1006 | * Different kinds of faults, as returned by handle_mm_fault(). | 1034 | * Different kinds of faults, as returned by handle_mm_fault(). |
1007 | * Used to decide whether a process gets delivered SIGBUS or | 1035 | * Used to decide whether a process gets delivered SIGBUS or |
1008 | * just gets major/minor fault counters bumped up. | 1036 | * just gets major/minor fault counters bumped up. |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0038ac7466fd..15549578d559 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -63,15 +63,6 @@ struct page { | |||
63 | union { | 63 | union { |
64 | pgoff_t index; /* Our offset within mapping. */ | 64 | pgoff_t index; /* Our offset within mapping. */ |
65 | void *freelist; /* sl[aou]b first free object */ | 65 | void *freelist; /* sl[aou]b first free object */ |
66 | bool pfmemalloc; /* If set by the page allocator, | ||
67 | * ALLOC_NO_WATERMARKS was set | ||
68 | * and the low watermark was not | ||
69 | * met implying that the system | ||
70 | * is under some pressure. The | ||
71 | * caller should try ensure | ||
72 | * this page is only used to | ||
73 | * free other pages. | ||
74 | */ | ||
75 | }; | 66 | }; |
76 | 67 | ||
77 | union { | 68 | union { |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 8a0321a8fb59..860c751810fc 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1202,6 +1202,7 @@ struct msix_entry { | |||
1202 | u16 entry; /* driver uses to specify entry, OS writes */ | 1202 | u16 entry; /* driver uses to specify entry, OS writes */ |
1203 | }; | 1203 | }; |
1204 | 1204 | ||
1205 | void pci_msi_setup_pci_dev(struct pci_dev *dev); | ||
1205 | 1206 | ||
1206 | #ifdef CONFIG_PCI_MSI | 1207 | #ifdef CONFIG_PCI_MSI |
1207 | int pci_msi_vec_count(struct pci_dev *dev); | 1208 | int pci_msi_vec_count(struct pci_dev *dev); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 22b6d9ca1654..9b88536487e6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, | |||
1602 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1602 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1603 | 1603 | ||
1604 | /* | 1604 | /* |
1605 | * Propagate page->pfmemalloc to the skb if we can. The problem is | 1605 | * Propagate page pfmemalloc to the skb if we can. The problem is |
1606 | * that not all callers have unique ownership of the page. If | 1606 | * that not all callers have unique ownership of the page but rely |
1607 | * pfmemalloc is set, we check the mapping as a mapping implies | 1607 | * on page_is_pfmemalloc doing the right thing(tm). |
1608 | * page->index is set (index and pfmemalloc share space). | ||
1609 | * If it's a valid mapping, we cannot use page->pfmemalloc but we | ||
1610 | * do not lose pfmemalloc information as the pages would not be | ||
1611 | * allocated using __GFP_MEMALLOC. | ||
1612 | */ | 1608 | */ |
1613 | frag->page.p = page; | 1609 | frag->page.p = page; |
1614 | frag->page_offset = off; | 1610 | frag->page_offset = off; |
1615 | skb_frag_size_set(frag, size); | 1611 | skb_frag_size_set(frag, size); |
1616 | 1612 | ||
1617 | page = compound_head(page); | 1613 | page = compound_head(page); |
1618 | if (page->pfmemalloc && !page->mapping) | 1614 | if (page_is_pfmemalloc(page)) |
1619 | skb->pfmemalloc = true; | 1615 | skb->pfmemalloc = true; |
1620 | } | 1616 | } |
1621 | 1617 | ||
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void) | |||
2263 | static inline void skb_propagate_pfmemalloc(struct page *page, | 2259 | static inline void skb_propagate_pfmemalloc(struct page *page, |
2264 | struct sk_buff *skb) | 2260 | struct sk_buff *skb) |
2265 | { | 2261 | { |
2266 | if (page && page->pfmemalloc) | 2262 | if (page_is_pfmemalloc(page)) |
2267 | skb->pfmemalloc = true; | 2263 | skb->pfmemalloc = true; |
2268 | } | 2264 | } |
2269 | 2265 | ||
diff --git a/include/media/rc-core.h b/include/media/rc-core.h index 45534da57759..644bdc61c387 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h | |||
@@ -74,8 +74,6 @@ enum rc_filter_type { | |||
74 | * @input_dev: the input child device used to communicate events to userspace | 74 | * @input_dev: the input child device used to communicate events to userspace |
75 | * @driver_type: specifies if protocol decoding is done in hardware or software | 75 | * @driver_type: specifies if protocol decoding is done in hardware or software |
76 | * @idle: used to keep track of RX state | 76 | * @idle: used to keep track of RX state |
77 | * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed | ||
78 | * wakeup protocols is the set of all raw encoders | ||
79 | * @allowed_protocols: bitmask with the supported RC_BIT_* protocols | 77 | * @allowed_protocols: bitmask with the supported RC_BIT_* protocols |
80 | * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols | 78 | * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols |
81 | * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols | 79 | * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols |
@@ -136,7 +134,6 @@ struct rc_dev { | |||
136 | struct input_dev *input_dev; | 134 | struct input_dev *input_dev; |
137 | enum rc_driver_type driver_type; | 135 | enum rc_driver_type driver_type; |
138 | bool idle; | 136 | bool idle; |
139 | bool encode_wakeup; | ||
140 | u64 allowed_protocols; | 137 | u64 allowed_protocols; |
141 | u64 enabled_protocols; | 138 | u64 enabled_protocols; |
142 | u64 allowed_wakeup_protocols; | 139 | u64 allowed_wakeup_protocols; |
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev) | |||
246 | #define US_TO_NS(usec) ((usec) * 1000) | 243 | #define US_TO_NS(usec) ((usec) * 1000) |
247 | #define MS_TO_US(msec) ((msec) * 1000) | 244 | #define MS_TO_US(msec) ((msec) * 1000) |
248 | #define MS_TO_NS(msec) ((msec) * 1000 * 1000) | 245 | #define MS_TO_NS(msec) ((msec) * 1000 * 1000) |
249 | #define NS_TO_US(nsec) DIV_ROUND_UP(nsec, 1000L) | ||
250 | 246 | ||
251 | void ir_raw_event_handle(struct rc_dev *dev); | 247 | void ir_raw_event_handle(struct rc_dev *dev); |
252 | int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); | 248 | int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev); |
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type); | |||
254 | int ir_raw_event_store_with_filter(struct rc_dev *dev, | 250 | int ir_raw_event_store_with_filter(struct rc_dev *dev, |
255 | struct ir_raw_event *ev); | 251 | struct ir_raw_event *ev); |
256 | void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); | 252 | void ir_raw_event_set_idle(struct rc_dev *dev, bool idle); |
257 | int ir_raw_encode_scancode(u64 protocols, | ||
258 | const struct rc_scancode_filter *scancode, | ||
259 | struct ir_raw_event *events, unsigned int max); | ||
260 | 253 | ||
261 | static inline void ir_raw_event_reset(struct rc_dev *dev) | 254 | static inline void ir_raw_event_reset(struct rc_dev *dev) |
262 | { | 255 | { |
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 22a44c2f5963..c192e1b46cdc 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h | |||
@@ -139,6 +139,7 @@ enum vb2_io_modes { | |||
139 | * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf | 139 | * @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf |
140 | * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver | 140 | * @VB2_BUF_STATE_PREPARED: buffer prepared in videobuf and by the driver |
141 | * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver | 141 | * @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver |
142 | * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver | ||
142 | * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used | 143 | * @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used |
143 | * in a hardware operation | 144 | * in a hardware operation |
144 | * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but | 145 | * @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but |
@@ -152,6 +153,7 @@ enum vb2_buffer_state { | |||
152 | VB2_BUF_STATE_PREPARING, | 153 | VB2_BUF_STATE_PREPARING, |
153 | VB2_BUF_STATE_PREPARED, | 154 | VB2_BUF_STATE_PREPARED, |
154 | VB2_BUF_STATE_QUEUED, | 155 | VB2_BUF_STATE_QUEUED, |
156 | VB2_BUF_STATE_REQUEUEING, | ||
155 | VB2_BUF_STATE_ACTIVE, | 157 | VB2_BUF_STATE_ACTIVE, |
156 | VB2_BUF_STATE_DONE, | 158 | VB2_BUF_STATE_DONE, |
157 | VB2_BUF_STATE_ERROR, | 159 | VB2_BUF_STATE_ERROR, |
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h index 865a141b118b..427bc41df3ae 100644 --- a/include/sound/soc-topology.h +++ b/include/sound/soc-topology.h | |||
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops { | |||
141 | int io_ops_count; | 141 | int io_ops_count; |
142 | }; | 142 | }; |
143 | 143 | ||
144 | #ifdef CONFIG_SND_SOC_TOPOLOGY | ||
145 | |||
144 | /* gets a pointer to data from the firmware block header */ | 146 | /* gets a pointer to data from the firmware block header */ |
145 | static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr) | 147 | static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr) |
146 | { | 148 | { |
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w, | |||
165 | const struct snd_soc_tplg_widget_events *events, int num_events, | 167 | const struct snd_soc_tplg_widget_events *events, int num_events, |
166 | u16 event_type); | 168 | u16 event_type); |
167 | 169 | ||
170 | #else | ||
171 | |||
172 | static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp, | ||
173 | u32 index) | ||
174 | { | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | #endif | ||
179 | |||
168 | #endif | 180 | #endif |
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 51b8066a223b..247c50bd60f0 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h | |||
@@ -18,6 +18,12 @@ | |||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <sound/asound.h> | 19 | #include <sound/asound.h> |
20 | 20 | ||
21 | #ifndef __KERNEL__ | ||
22 | #error This API is an early revision and not enabled in the current | ||
23 | #error kernel release, it will be enabled in a future kernel version | ||
24 | #error with incompatible changes to what is here. | ||
25 | #endif | ||
26 | |||
21 | /* | 27 | /* |
22 | * Maximum number of channels topology kcontrol can represent. | 28 | * Maximum number of channels topology kcontrol can represent. |
23 | */ | 29 | */ |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 27f4332c7f84..ae216824e8ca 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -985,6 +985,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data, | |||
985 | } | 985 | } |
986 | 986 | ||
987 | /** | 987 | /** |
988 | * irq_chip_set_type_parent - Set IRQ type on the parent interrupt | ||
989 | * @data: Pointer to interrupt specific data | ||
990 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h | ||
991 | * | ||
992 | * Conditional, as the underlying parent chip might not implement it. | ||
993 | */ | ||
994 | int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) | ||
995 | { | ||
996 | data = data->parent_data; | ||
997 | |||
998 | if (data->chip->irq_set_type) | ||
999 | return data->chip->irq_set_type(data, type); | ||
1000 | |||
1001 | return -ENOSYS; | ||
1002 | } | ||
1003 | |||
1004 | /** | ||
988 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware | 1005 | * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware |
989 | * @data: Pointer to interrupt specific data | 1006 | * @data: Pointer to interrupt specific data |
990 | * | 1007 | * |
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data) | |||
997 | if (data->chip && data->chip->irq_retrigger) | 1014 | if (data->chip && data->chip->irq_retrigger) |
998 | return data->chip->irq_retrigger(data); | 1015 | return data->chip->irq_retrigger(data); |
999 | 1016 | ||
1000 | return -ENOSYS; | 1017 | return 0; |
1001 | } | 1018 | } |
1002 | 1019 | ||
1003 | /** | 1020 | /** |
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 5e097fa9faf7..84190f02b521 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, | |||
807 | spin_unlock(&base->lock); | 807 | spin_unlock(&base->lock); |
808 | base = new_base; | 808 | base = new_base; |
809 | spin_lock(&base->lock); | 809 | spin_lock(&base->lock); |
810 | timer->flags &= ~TIMER_BASEMASK; | 810 | WRITE_ONCE(timer->flags, |
811 | timer->flags |= base->cpu; | 811 | (timer->flags & ~TIMER_BASEMASK) | base->cpu); |
812 | } | 812 | } |
813 | } | 813 | } |
814 | 814 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index df959b7d6085..5b5240b7f642 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1343,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, | |||
1343 | set_page_owner(page, order, gfp_flags); | 1343 | set_page_owner(page, order, gfp_flags); |
1344 | 1344 | ||
1345 | /* | 1345 | /* |
1346 | * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to | 1346 | * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to |
1347 | * allocate the page. The expectation is that the caller is taking | 1347 | * allocate the page. The expectation is that the caller is taking |
1348 | * steps that will free more memory. The caller should avoid the page | 1348 | * steps that will free more memory. The caller should avoid the page |
1349 | * being used for !PFMEMALLOC purposes. | 1349 | * being used for !PFMEMALLOC purposes. |
1350 | */ | 1350 | */ |
1351 | page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); | 1351 | if (alloc_flags & ALLOC_NO_WATERMARKS) |
1352 | set_page_pfmemalloc(page); | ||
1353 | else | ||
1354 | clear_page_pfmemalloc(page); | ||
1352 | 1355 | ||
1353 | return 0; | 1356 | return 0; |
1354 | } | 1357 | } |
@@ -3345,7 +3348,7 @@ refill: | |||
3345 | atomic_add(size - 1, &page->_count); | 3348 | atomic_add(size - 1, &page->_count); |
3346 | 3349 | ||
3347 | /* reset page count bias and offset to start of new frag */ | 3350 | /* reset page count bias and offset to start of new frag */ |
3348 | nc->pfmemalloc = page->pfmemalloc; | 3351 | nc->pfmemalloc = page_is_pfmemalloc(page); |
3349 | nc->pagecnt_bias = size; | 3352 | nc->pagecnt_bias = size; |
3350 | nc->offset = size; | 3353 | nc->offset = size; |
3351 | } | 3354 | } |
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, | |||
1603 | } | 1603 | } |
1604 | 1604 | ||
1605 | /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ | 1605 | /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ |
1606 | if (unlikely(page->pfmemalloc)) | 1606 | if (page_is_pfmemalloc(page)) |
1607 | pfmemalloc_active = true; | 1607 | pfmemalloc_active = true; |
1608 | 1608 | ||
1609 | nr_pages = (1 << cachep->gfporder); | 1609 | nr_pages = (1 << cachep->gfporder); |
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, | |||
1614 | add_zone_page_state(page_zone(page), | 1614 | add_zone_page_state(page_zone(page), |
1615 | NR_SLAB_UNRECLAIMABLE, nr_pages); | 1615 | NR_SLAB_UNRECLAIMABLE, nr_pages); |
1616 | __SetPageSlab(page); | 1616 | __SetPageSlab(page); |
1617 | if (page->pfmemalloc) | 1617 | if (page_is_pfmemalloc(page)) |
1618 | SetPageSlabPfmemalloc(page); | 1618 | SetPageSlabPfmemalloc(page); |
1619 | 1619 | ||
1620 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { | 1620 | if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { |
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1427 | inc_slabs_node(s, page_to_nid(page), page->objects); | 1427 | inc_slabs_node(s, page_to_nid(page), page->objects); |
1428 | page->slab_cache = s; | 1428 | page->slab_cache = s; |
1429 | __SetPageSlab(page); | 1429 | __SetPageSlab(page); |
1430 | if (page->pfmemalloc) | 1430 | if (page_is_pfmemalloc(page)) |
1431 | SetPageSlabPfmemalloc(page); | 1431 | SetPageSlabPfmemalloc(page); |
1432 | 1432 | ||
1433 | start = page_address(page); | 1433 | start = page_address(page); |
diff --git a/net/9p/client.c b/net/9p/client.c index 498454b3c06c..ea79ee9a7348 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err) | |||
1541 | struct p9_client *clnt = fid->clnt; | 1541 | struct p9_client *clnt = fid->clnt; |
1542 | struct p9_req_t *req; | 1542 | struct p9_req_t *req; |
1543 | int total = 0; | 1543 | int total = 0; |
1544 | *err = 0; | ||
1544 | 1545 | ||
1545 | p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", | 1546 | p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", |
1546 | fid->fid, (unsigned long long) offset, (int)iov_iter_count(to)); | 1547 | fid->fid, (unsigned long long) offset, (int)iov_iter_count(to)); |
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) | |||
1620 | struct p9_client *clnt = fid->clnt; | 1621 | struct p9_client *clnt = fid->clnt; |
1621 | struct p9_req_t *req; | 1622 | struct p9_req_t *req; |
1622 | int total = 0; | 1623 | int total = 0; |
1624 | *err = 0; | ||
1623 | 1625 | ||
1624 | p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n", | 1626 | p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n", |
1625 | fid->fid, (unsigned long long) offset, | 1627 | fid->fid, (unsigned long long) offset, |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 5e953297d3b2..5809b39c1922 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
@@ -595,8 +595,11 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, | |||
595 | /* increase the refcounter of the related vlan */ | 595 | /* increase the refcounter of the related vlan */ |
596 | vlan = batadv_softif_vlan_get(bat_priv, vid); | 596 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
597 | if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", | 597 | if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", |
598 | addr, BATADV_PRINT_VID(vid))) | 598 | addr, BATADV_PRINT_VID(vid))) { |
599 | kfree(tt_local); | ||
600 | tt_local = NULL; | ||
599 | goto out; | 601 | goto out; |
602 | } | ||
600 | 603 | ||
601 | batadv_dbg(BATADV_DBG_TT, bat_priv, | 604 | batadv_dbg(BATADV_DBG_TT, bat_priv, |
602 | "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", | 605 | "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 0b39dcc65b94..1285eaf5dc22 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br, | |||
1591 | break; | 1591 | break; |
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | if (skb_trimmed) | 1594 | if (skb_trimmed && skb_trimmed != skb) |
1595 | kfree_skb(skb_trimmed); | 1595 | kfree_skb(skb_trimmed); |
1596 | 1596 | ||
1597 | return err; | 1597 | return err; |
@@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br, | |||
1636 | break; | 1636 | break; |
1637 | } | 1637 | } |
1638 | 1638 | ||
1639 | if (skb_trimmed) | 1639 | if (skb_trimmed && skb_trimmed != skb) |
1640 | kfree_skb(skb_trimmed); | 1640 | kfree_skb(skb_trimmed); |
1641 | 1641 | ||
1642 | return err; | 1642 | return err; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b6a19ca0f99e..7b84330e5d30 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) | |||
340 | 340 | ||
341 | if (skb && frag_size) { | 341 | if (skb && frag_size) { |
342 | skb->head_frag = 1; | 342 | skb->head_frag = 1; |
343 | if (virt_to_head_page(data)->pfmemalloc) | 343 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
344 | skb->pfmemalloc = 1; | 344 | skb->pfmemalloc = 1; |
345 | } | 345 | } |
346 | return skb; | 346 | return skb; |
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup); | |||
4022 | * Otherwise returns the provided skb. Returns NULL in error cases | 4022 | * Otherwise returns the provided skb. Returns NULL in error cases |
4023 | * (e.g. transport_len exceeds skb length or out-of-memory). | 4023 | * (e.g. transport_len exceeds skb length or out-of-memory). |
4024 | * | 4024 | * |
4025 | * Caller needs to set the skb transport header and release the returned skb. | 4025 | * Caller needs to set the skb transport header and free any returned skb if it |
4026 | * Provided skb is consumed. | 4026 | * differs from the provided skb. |
4027 | */ | 4027 | */ |
4028 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, | 4028 | static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, |
4029 | unsigned int transport_len) | 4029 | unsigned int transport_len) |
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, | |||
4032 | unsigned int len = skb_transport_offset(skb) + transport_len; | 4032 | unsigned int len = skb_transport_offset(skb) + transport_len; |
4033 | int ret; | 4033 | int ret; |
4034 | 4034 | ||
4035 | if (skb->len < len) { | 4035 | if (skb->len < len) |
4036 | kfree_skb(skb); | ||
4037 | return NULL; | 4036 | return NULL; |
4038 | } else if (skb->len == len) { | 4037 | else if (skb->len == len) |
4039 | return skb; | 4038 | return skb; |
4040 | } | ||
4041 | 4039 | ||
4042 | skb_chk = skb_clone(skb, GFP_ATOMIC); | 4040 | skb_chk = skb_clone(skb, GFP_ATOMIC); |
4043 | kfree_skb(skb); | ||
4044 | |||
4045 | if (!skb_chk) | 4041 | if (!skb_chk) |
4046 | return NULL; | 4042 | return NULL; |
4047 | 4043 | ||
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, | |||
4066 | * If the skb has data beyond the given transport length, then a | 4062 | * If the skb has data beyond the given transport length, then a |
4067 | * trimmed & cloned skb is checked and returned. | 4063 | * trimmed & cloned skb is checked and returned. |
4068 | * | 4064 | * |
4069 | * Caller needs to set the skb transport header and release the returned skb. | 4065 | * Caller needs to set the skb transport header and free any returned skb if it |
4070 | * Provided skb is consumed. | 4066 | * differs from the provided skb. |
4071 | */ | 4067 | */ |
4072 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, | 4068 | struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, |
4073 | unsigned int transport_len, | 4069 | unsigned int transport_len, |
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, | |||
4079 | 4075 | ||
4080 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); | 4076 | skb_chk = skb_checksum_maybe_trim(skb, transport_len); |
4081 | if (!skb_chk) | 4077 | if (!skb_chk) |
4082 | return NULL; | 4078 | goto err; |
4083 | 4079 | ||
4084 | if (!pskb_may_pull(skb_chk, offset)) { | 4080 | if (!pskb_may_pull(skb_chk, offset)) |
4085 | kfree_skb(skb_chk); | 4081 | goto err; |
4086 | return NULL; | ||
4087 | } | ||
4088 | 4082 | ||
4089 | __skb_pull(skb_chk, offset); | 4083 | __skb_pull(skb_chk, offset); |
4090 | ret = skb_chkf(skb_chk); | 4084 | ret = skb_chkf(skb_chk); |
4091 | __skb_push(skb_chk, offset); | 4085 | __skb_push(skb_chk, offset); |
4092 | 4086 | ||
4093 | if (ret) { | 4087 | if (ret) |
4094 | kfree_skb(skb_chk); | 4088 | goto err; |
4095 | return NULL; | ||
4096 | } | ||
4097 | 4089 | ||
4098 | return skb_chk; | 4090 | return skb_chk; |
4091 | |||
4092 | err: | ||
4093 | if (skb_chk && skb_chk != skb) | ||
4094 | kfree_skb(skb_chk); | ||
4095 | |||
4096 | return NULL; | ||
4097 | |||
4099 | } | 4098 | } |
4100 | EXPORT_SYMBOL(skb_checksum_trimmed); | 4099 | EXPORT_SYMBOL(skb_checksum_trimmed); |
4101 | 4100 | ||
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 37c4bb89a708..b0c6258ffb79 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
2465 | key = l->key + 1; | 2465 | key = l->key + 1; |
2466 | iter->pos++; | 2466 | iter->pos++; |
2467 | 2467 | ||
2468 | if (pos-- <= 0) | 2468 | if (--pos <= 0) |
2469 | break; | 2469 | break; |
2470 | 2470 | ||
2471 | l = NULL; | 2471 | l = NULL; |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 651cdf648ec4..9fdfd9deac11 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) | |||
1435 | struct sk_buff *skb_chk; | 1435 | struct sk_buff *skb_chk; |
1436 | unsigned int transport_len; | 1436 | unsigned int transport_len; |
1437 | unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); | 1437 | unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr); |
1438 | int ret; | 1438 | int ret = -EINVAL; |
1439 | 1439 | ||
1440 | transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); | 1440 | transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb); |
1441 | 1441 | ||
1442 | skb_get(skb); | ||
1443 | skb_chk = skb_checksum_trimmed(skb, transport_len, | 1442 | skb_chk = skb_checksum_trimmed(skb, transport_len, |
1444 | ip_mc_validate_checksum); | 1443 | ip_mc_validate_checksum); |
1445 | if (!skb_chk) | 1444 | if (!skb_chk) |
1446 | return -EINVAL; | 1445 | goto err; |
1447 | 1446 | ||
1448 | if (!pskb_may_pull(skb_chk, len)) { | 1447 | if (!pskb_may_pull(skb_chk, len)) |
1449 | kfree_skb(skb_chk); | 1448 | goto err; |
1450 | return -EINVAL; | ||
1451 | } | ||
1452 | 1449 | ||
1453 | ret = ip_mc_check_igmp_msg(skb_chk); | 1450 | ret = ip_mc_check_igmp_msg(skb_chk); |
1454 | if (ret) { | 1451 | if (ret) |
1455 | kfree_skb(skb_chk); | 1452 | goto err; |
1456 | return ret; | ||
1457 | } | ||
1458 | 1453 | ||
1459 | if (skb_trimmed) | 1454 | if (skb_trimmed) |
1460 | *skb_trimmed = skb_chk; | 1455 | *skb_trimmed = skb_chk; |
1461 | else | 1456 | /* free now unneeded clone */ |
1457 | else if (skb_chk != skb) | ||
1462 | kfree_skb(skb_chk); | 1458 | kfree_skb(skb_chk); |
1463 | 1459 | ||
1464 | return 0; | 1460 | ret = 0; |
1461 | |||
1462 | err: | ||
1463 | if (ret && skb_chk && skb_chk != skb) | ||
1464 | kfree_skb(skb_chk); | ||
1465 | |||
1466 | return ret; | ||
1465 | } | 1467 | } |
1466 | 1468 | ||
1467 | /** | 1469 | /** |
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) | |||
1470 | * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) | 1472 | * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional) |
1471 | * | 1473 | * |
1472 | * Checks whether an IPv4 packet is a valid IGMP packet. If so sets | 1474 | * Checks whether an IPv4 packet is a valid IGMP packet. If so sets |
1473 | * skb network and transport headers accordingly and returns zero. | 1475 | * skb transport header accordingly and returns zero. |
1474 | * | 1476 | * |
1475 | * -EINVAL: A broken packet was detected, i.e. it violates some internet | 1477 | * -EINVAL: A broken packet was detected, i.e. it violates some internet |
1476 | * standard | 1478 | * standard |
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) | |||
1485 | * to leave the original skb and its full frame unchanged (which might be | 1487 | * to leave the original skb and its full frame unchanged (which might be |
1486 | * desirable for layer 2 frame jugglers). | 1488 | * desirable for layer 2 frame jugglers). |
1487 | * | 1489 | * |
1488 | * The caller needs to release a reference count from any returned skb_trimmed. | 1490 | * Caller needs to set the skb network header and free any returned skb if it |
1491 | * differs from the provided skb. | ||
1489 | */ | 1492 | */ |
1490 | int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) | 1493 | int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed) |
1491 | { | 1494 | { |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 05e3145f7dc3..134957159c27 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, | |||
593 | } | 593 | } |
594 | 594 | ||
595 | spin_unlock(&queue->syn_wait_lock); | 595 | spin_unlock(&queue->syn_wait_lock); |
596 | if (del_timer_sync(&req->rsk_timer)) | 596 | if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) |
597 | reqsk_put(req); | 597 | reqsk_put(req); |
598 | return found; | 598 | return found; |
599 | } | 599 | } |
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 433231ccfb17..0330ab2e2b63 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1; | |||
41 | static int tcp_syn_retries_max = MAX_TCP_SYNCNT; | 41 | static int tcp_syn_retries_max = MAX_TCP_SYNCNT; |
42 | static int ip_ping_group_range_min[] = { 0, 0 }; | 42 | static int ip_ping_group_range_min[] = { 0, 0 }; |
43 | static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; | 43 | static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; |
44 | static int min_sndbuf = SOCK_MIN_SNDBUF; | ||
45 | static int min_rcvbuf = SOCK_MIN_RCVBUF; | ||
46 | 44 | ||
47 | /* Update system visible IP port range */ | 45 | /* Update system visible IP port range */ |
48 | static void set_local_port_range(struct net *net, int range[2]) | 46 | static void set_local_port_range(struct net *net, int range[2]) |
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = { | |||
530 | .maxlen = sizeof(sysctl_tcp_wmem), | 528 | .maxlen = sizeof(sysctl_tcp_wmem), |
531 | .mode = 0644, | 529 | .mode = 0644, |
532 | .proc_handler = proc_dointvec_minmax, | 530 | .proc_handler = proc_dointvec_minmax, |
533 | .extra1 = &min_sndbuf, | 531 | .extra1 = &one, |
534 | }, | 532 | }, |
535 | { | 533 | { |
536 | .procname = "tcp_notsent_lowat", | 534 | .procname = "tcp_notsent_lowat", |
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = { | |||
545 | .maxlen = sizeof(sysctl_tcp_rmem), | 543 | .maxlen = sizeof(sysctl_tcp_rmem), |
546 | .mode = 0644, | 544 | .mode = 0644, |
547 | .proc_handler = proc_dointvec_minmax, | 545 | .proc_handler = proc_dointvec_minmax, |
548 | .extra1 = &min_rcvbuf, | 546 | .extra1 = &one, |
549 | }, | 547 | }, |
550 | { | 548 | { |
551 | .procname = "tcp_app_win", | 549 | .procname = "tcp_app_win", |
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = { | |||
758 | .maxlen = sizeof(sysctl_udp_rmem_min), | 756 | .maxlen = sizeof(sysctl_udp_rmem_min), |
759 | .mode = 0644, | 757 | .mode = 0644, |
760 | .proc_handler = proc_dointvec_minmax, | 758 | .proc_handler = proc_dointvec_minmax, |
761 | .extra1 = &min_rcvbuf, | 759 | .extra1 = &one |
762 | }, | 760 | }, |
763 | { | 761 | { |
764 | .procname = "udp_wmem_min", | 762 | .procname = "udp_wmem_min", |
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = { | |||
766 | .maxlen = sizeof(sysctl_udp_wmem_min), | 764 | .maxlen = sizeof(sysctl_udp_wmem_min), |
767 | .mode = 0644, | 765 | .mode = 0644, |
768 | .proc_handler = proc_dointvec_minmax, | 766 | .proc_handler = proc_dointvec_minmax, |
769 | .extra1 = &min_sndbuf, | 767 | .extra1 = &one |
770 | }, | 768 | }, |
771 | { } | 769 | { } |
772 | }; | 770 | }; |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 55d19861ab20..548c6237b1e7 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) | |||
172 | *ppcpu_rt = NULL; | 172 | *ppcpu_rt = NULL; |
173 | } | 173 | } |
174 | } | 174 | } |
175 | |||
176 | non_pcpu_rt->rt6i_pcpu = NULL; | ||
175 | } | 177 | } |
176 | 178 | ||
177 | static void rt6_release(struct rt6_info *rt) | 179 | static void rt6_release(struct rt6_info *rt) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index a38d3ac0f18f..69f4f689f06a 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -361,6 +361,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev) | |||
361 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); | 361 | struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); |
362 | 362 | ||
363 | ip6gre_tunnel_unlink(ign, t); | 363 | ip6gre_tunnel_unlink(ign, t); |
364 | ip6_tnl_dst_reset(t); | ||
364 | dev_put(dev); | 365 | dev_put(dev); |
365 | } | 366 | } |
366 | 367 | ||
diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c index df8afe5ab31e..9405b04eecc6 100644 --- a/net/ipv6/mcast_snoop.c +++ b/net/ipv6/mcast_snoop.c | |||
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb, | |||
143 | struct sk_buff *skb_chk = NULL; | 143 | struct sk_buff *skb_chk = NULL; |
144 | unsigned int transport_len; | 144 | unsigned int transport_len; |
145 | unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); | 145 | unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); |
146 | int ret; | 146 | int ret = -EINVAL; |
147 | 147 | ||
148 | transport_len = ntohs(ipv6_hdr(skb)->payload_len); | 148 | transport_len = ntohs(ipv6_hdr(skb)->payload_len); |
149 | transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); | 149 | transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr); |
150 | 150 | ||
151 | skb_get(skb); | ||
152 | skb_chk = skb_checksum_trimmed(skb, transport_len, | 151 | skb_chk = skb_checksum_trimmed(skb, transport_len, |
153 | ipv6_mc_validate_checksum); | 152 | ipv6_mc_validate_checksum); |
154 | if (!skb_chk) | 153 | if (!skb_chk) |
155 | return -EINVAL; | 154 | goto err; |
156 | 155 | ||
157 | if (!pskb_may_pull(skb_chk, len)) { | 156 | if (!pskb_may_pull(skb_chk, len)) |
158 | kfree_skb(skb_chk); | 157 | goto err; |
159 | return -EINVAL; | ||
160 | } | ||
161 | 158 | ||
162 | ret = ipv6_mc_check_mld_msg(skb_chk); | 159 | ret = ipv6_mc_check_mld_msg(skb_chk); |
163 | if (ret) { | 160 | if (ret) |
164 | kfree_skb(skb_chk); | 161 | goto err; |
165 | return ret; | ||
166 | } | ||
167 | 162 | ||
168 | if (skb_trimmed) | 163 | if (skb_trimmed) |
169 | *skb_trimmed = skb_chk; | 164 | *skb_trimmed = skb_chk; |
170 | else | 165 | /* free now unneeded clone */ |
166 | else if (skb_chk != skb) | ||
171 | kfree_skb(skb_chk); | 167 | kfree_skb(skb_chk); |
172 | 168 | ||
173 | return 0; | 169 | ret = 0; |
170 | |||
171 | err: | ||
172 | if (ret && skb_chk && skb_chk != skb) | ||
173 | kfree_skb(skb_chk); | ||
174 | |||
175 | return ret; | ||
174 | } | 176 | } |
175 | 177 | ||
176 | /** | 178 | /** |
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb, | |||
179 | * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) | 181 | * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional) |
180 | * | 182 | * |
181 | * Checks whether an IPv6 packet is a valid MLD packet. If so sets | 183 | * Checks whether an IPv6 packet is a valid MLD packet. If so sets |
182 | * skb network and transport headers accordingly and returns zero. | 184 | * skb transport header accordingly and returns zero. |
183 | * | 185 | * |
184 | * -EINVAL: A broken packet was detected, i.e. it violates some internet | 186 | * -EINVAL: A broken packet was detected, i.e. it violates some internet |
185 | * standard | 187 | * standard |
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb, | |||
194 | * to leave the original skb and its full frame unchanged (which might be | 196 | * to leave the original skb and its full frame unchanged (which might be |
195 | * desirable for layer 2 frame jugglers). | 197 | * desirable for layer 2 frame jugglers). |
196 | * | 198 | * |
197 | * The caller needs to release a reference count from any returned skb_trimmed. | 199 | * Caller needs to set the skb network header and free any returned skb if it |
200 | * differs from the provided skb. | ||
198 | */ | 201 | */ |
199 | int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) | 202 | int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed) |
200 | { | 203 | { |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9de4d2bcd916..d15586490cec 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = { | |||
318 | /* allocate dst with ip6_dst_ops */ | 318 | /* allocate dst with ip6_dst_ops */ |
319 | static struct rt6_info *__ip6_dst_alloc(struct net *net, | 319 | static struct rt6_info *__ip6_dst_alloc(struct net *net, |
320 | struct net_device *dev, | 320 | struct net_device *dev, |
321 | int flags, | 321 | int flags) |
322 | struct fib6_table *table) | ||
323 | { | 322 | { |
324 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, | 323 | struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, |
325 | 0, DST_OBSOLETE_FORCE_CHK, flags); | 324 | 0, DST_OBSOLETE_FORCE_CHK, flags); |
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net, | |||
336 | 335 | ||
337 | static struct rt6_info *ip6_dst_alloc(struct net *net, | 336 | static struct rt6_info *ip6_dst_alloc(struct net *net, |
338 | struct net_device *dev, | 337 | struct net_device *dev, |
339 | int flags, | 338 | int flags) |
340 | struct fib6_table *table) | ||
341 | { | 339 | { |
342 | struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); | 340 | struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); |
343 | 341 | ||
344 | if (rt) { | 342 | if (rt) { |
345 | rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); | 343 | rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); |
@@ -950,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, | |||
950 | if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) | 948 | if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) |
951 | ort = (struct rt6_info *)ort->dst.from; | 949 | ort = (struct rt6_info *)ort->dst.from; |
952 | 950 | ||
953 | rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, | 951 | rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0); |
954 | 0, ort->rt6i_table); | ||
955 | 952 | ||
956 | if (!rt) | 953 | if (!rt) |
957 | return NULL; | 954 | return NULL; |
@@ -983,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) | |||
983 | struct rt6_info *pcpu_rt; | 980 | struct rt6_info *pcpu_rt; |
984 | 981 | ||
985 | pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), | 982 | pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), |
986 | rt->dst.dev, rt->dst.flags, | 983 | rt->dst.dev, rt->dst.flags); |
987 | rt->rt6i_table); | ||
988 | 984 | ||
989 | if (!pcpu_rt) | 985 | if (!pcpu_rt) |
990 | return NULL; | 986 | return NULL; |
@@ -997,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) | |||
997 | /* It should be called with read_lock_bh(&tb6_lock) acquired */ | 993 | /* It should be called with read_lock_bh(&tb6_lock) acquired */ |
998 | static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) | 994 | static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) |
999 | { | 995 | { |
1000 | struct rt6_info *pcpu_rt, *prev, **p; | 996 | struct rt6_info *pcpu_rt, **p; |
1001 | 997 | ||
1002 | p = this_cpu_ptr(rt->rt6i_pcpu); | 998 | p = this_cpu_ptr(rt->rt6i_pcpu); |
1003 | pcpu_rt = *p; | 999 | pcpu_rt = *p; |
1004 | 1000 | ||
1005 | if (pcpu_rt) | 1001 | if (pcpu_rt) { |
1006 | goto done; | 1002 | dst_hold(&pcpu_rt->dst); |
1003 | rt6_dst_from_metrics_check(pcpu_rt); | ||
1004 | } | ||
1005 | return pcpu_rt; | ||
1006 | } | ||
1007 | |||
1008 | static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) | ||
1009 | { | ||
1010 | struct fib6_table *table = rt->rt6i_table; | ||
1011 | struct rt6_info *pcpu_rt, *prev, **p; | ||
1007 | 1012 | ||
1008 | pcpu_rt = ip6_rt_pcpu_alloc(rt); | 1013 | pcpu_rt = ip6_rt_pcpu_alloc(rt); |
1009 | if (!pcpu_rt) { | 1014 | if (!pcpu_rt) { |
1010 | struct net *net = dev_net(rt->dst.dev); | 1015 | struct net *net = dev_net(rt->dst.dev); |
1011 | 1016 | ||
1012 | pcpu_rt = net->ipv6.ip6_null_entry; | 1017 | dst_hold(&net->ipv6.ip6_null_entry->dst); |
1013 | goto done; | 1018 | return net->ipv6.ip6_null_entry; |
1014 | } | 1019 | } |
1015 | 1020 | ||
1016 | prev = cmpxchg(p, NULL, pcpu_rt); | 1021 | read_lock_bh(&table->tb6_lock); |
1017 | if (prev) { | 1022 | if (rt->rt6i_pcpu) { |
1018 | /* If someone did it before us, return prev instead */ | 1023 | p = this_cpu_ptr(rt->rt6i_pcpu); |
1024 | prev = cmpxchg(p, NULL, pcpu_rt); | ||
1025 | if (prev) { | ||
1026 | /* If someone did it before us, return prev instead */ | ||
1027 | dst_destroy(&pcpu_rt->dst); | ||
1028 | pcpu_rt = prev; | ||
1029 | } | ||
1030 | } else { | ||
1031 | /* rt has been removed from the fib6 tree | ||
1032 | * before we have a chance to acquire the read_lock. | ||
1033 | * In this case, don't brother to create a pcpu rt | ||
1034 | * since rt is going away anyway. The next | ||
1035 | * dst_check() will trigger a re-lookup. | ||
1036 | */ | ||
1019 | dst_destroy(&pcpu_rt->dst); | 1037 | dst_destroy(&pcpu_rt->dst); |
1020 | pcpu_rt = prev; | 1038 | pcpu_rt = rt; |
1021 | } | 1039 | } |
1022 | |||
1023 | done: | ||
1024 | dst_hold(&pcpu_rt->dst); | 1040 | dst_hold(&pcpu_rt->dst); |
1025 | rt6_dst_from_metrics_check(pcpu_rt); | 1041 | rt6_dst_from_metrics_check(pcpu_rt); |
1042 | read_unlock_bh(&table->tb6_lock); | ||
1026 | return pcpu_rt; | 1043 | return pcpu_rt; |
1027 | } | 1044 | } |
1028 | 1045 | ||
@@ -1097,9 +1114,22 @@ redo_rt6_select: | |||
1097 | rt->dst.lastuse = jiffies; | 1114 | rt->dst.lastuse = jiffies; |
1098 | rt->dst.__use++; | 1115 | rt->dst.__use++; |
1099 | pcpu_rt = rt6_get_pcpu_route(rt); | 1116 | pcpu_rt = rt6_get_pcpu_route(rt); |
1100 | read_unlock_bh(&table->tb6_lock); | 1117 | |
1118 | if (pcpu_rt) { | ||
1119 | read_unlock_bh(&table->tb6_lock); | ||
1120 | } else { | ||
1121 | /* We have to do the read_unlock first | ||
1122 | * because rt6_make_pcpu_route() may trigger | ||
1123 | * ip6_dst_gc() which will take the write_lock. | ||
1124 | */ | ||
1125 | dst_hold(&rt->dst); | ||
1126 | read_unlock_bh(&table->tb6_lock); | ||
1127 | pcpu_rt = rt6_make_pcpu_route(rt); | ||
1128 | dst_release(&rt->dst); | ||
1129 | } | ||
1101 | 1130 | ||
1102 | return pcpu_rt; | 1131 | return pcpu_rt; |
1132 | |||
1103 | } | 1133 | } |
1104 | } | 1134 | } |
1105 | 1135 | ||
@@ -1555,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, | |||
1555 | if (unlikely(!idev)) | 1585 | if (unlikely(!idev)) |
1556 | return ERR_PTR(-ENODEV); | 1586 | return ERR_PTR(-ENODEV); |
1557 | 1587 | ||
1558 | rt = ip6_dst_alloc(net, dev, 0, NULL); | 1588 | rt = ip6_dst_alloc(net, dev, 0); |
1559 | if (unlikely(!rt)) { | 1589 | if (unlikely(!rt)) { |
1560 | in6_dev_put(idev); | 1590 | in6_dev_put(idev); |
1561 | dst = ERR_PTR(-ENOMEM); | 1591 | dst = ERR_PTR(-ENOMEM); |
@@ -1742,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg) | |||
1742 | if (!table) | 1772 | if (!table) |
1743 | goto out; | 1773 | goto out; |
1744 | 1774 | ||
1745 | rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); | 1775 | rt = ip6_dst_alloc(net, NULL, |
1776 | (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); | ||
1746 | 1777 | ||
1747 | if (!rt) { | 1778 | if (!rt) { |
1748 | err = -ENOMEM; | 1779 | err = -ENOMEM; |
@@ -2399,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, | |||
2399 | { | 2430 | { |
2400 | struct net *net = dev_net(idev->dev); | 2431 | struct net *net = dev_net(idev->dev); |
2401 | struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, | 2432 | struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, |
2402 | DST_NOCOUNT, NULL); | 2433 | DST_NOCOUNT); |
2403 | if (!rt) | 2434 | if (!rt) |
2404 | return ERR_PTR(-ENOMEM); | 2435 | return ERR_PTR(-ENOMEM); |
2405 | 2436 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index b397f0aa9005..83a70688784b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -219,7 +219,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, | |||
219 | #define BROADCAST_ONE 1 | 219 | #define BROADCAST_ONE 1 |
220 | #define BROADCAST_REGISTERED 2 | 220 | #define BROADCAST_REGISTERED 2 |
221 | #define BROADCAST_PROMISC_ONLY 4 | 221 | #define BROADCAST_PROMISC_ONLY 4 |
222 | static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | 222 | static int pfkey_broadcast(struct sk_buff *skb, |
223 | int broadcast_flags, struct sock *one_sk, | 223 | int broadcast_flags, struct sock *one_sk, |
224 | struct net *net) | 224 | struct net *net) |
225 | { | 225 | { |
@@ -244,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
244 | * socket. | 244 | * socket. |
245 | */ | 245 | */ |
246 | if (pfk->promisc) | 246 | if (pfk->promisc) |
247 | pfkey_broadcast_one(skb, &skb2, allocation, sk); | 247 | pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); |
248 | 248 | ||
249 | /* the exact target will be processed later */ | 249 | /* the exact target will be processed later */ |
250 | if (sk == one_sk) | 250 | if (sk == one_sk) |
@@ -259,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
259 | continue; | 259 | continue; |
260 | } | 260 | } |
261 | 261 | ||
262 | err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk); | 262 | err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); |
263 | 263 | ||
264 | /* Error is cleare after succecful sending to at least one | 264 | /* Error is cleare after succecful sending to at least one |
265 | * registered KM */ | 265 | * registered KM */ |
@@ -269,7 +269,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
269 | rcu_read_unlock(); | 269 | rcu_read_unlock(); |
270 | 270 | ||
271 | if (one_sk != NULL) | 271 | if (one_sk != NULL) |
272 | err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); | 272 | err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk); |
273 | 273 | ||
274 | kfree_skb(skb2); | 274 | kfree_skb(skb2); |
275 | kfree_skb(skb); | 275 | kfree_skb(skb); |
@@ -292,7 +292,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk) | |||
292 | hdr = (struct sadb_msg *) pfk->dump.skb->data; | 292 | hdr = (struct sadb_msg *) pfk->dump.skb->data; |
293 | hdr->sadb_msg_seq = 0; | 293 | hdr->sadb_msg_seq = 0; |
294 | hdr->sadb_msg_errno = rc; | 294 | hdr->sadb_msg_errno = rc; |
295 | pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, | 295 | pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, |
296 | &pfk->sk, sock_net(&pfk->sk)); | 296 | &pfk->sk, sock_net(&pfk->sk)); |
297 | pfk->dump.skb = NULL; | 297 | pfk->dump.skb = NULL; |
298 | } | 298 | } |
@@ -333,7 +333,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) | |||
333 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / | 333 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / |
334 | sizeof(uint64_t)); | 334 | sizeof(uint64_t)); |
335 | 335 | ||
336 | pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); | 336 | pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
@@ -1365,7 +1365,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
1365 | 1365 | ||
1366 | xfrm_state_put(x); | 1366 | xfrm_state_put(x); |
1367 | 1367 | ||
1368 | pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); | 1368 | pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net); |
1369 | 1369 | ||
1370 | return 0; | 1370 | return 0; |
1371 | } | 1371 | } |
@@ -1452,7 +1452,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) | |||
1452 | hdr->sadb_msg_seq = c->seq; | 1452 | hdr->sadb_msg_seq = c->seq; |
1453 | hdr->sadb_msg_pid = c->portid; | 1453 | hdr->sadb_msg_pid = c->portid; |
1454 | 1454 | ||
1455 | pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); | 1455 | pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x)); |
1456 | 1456 | ||
1457 | return 0; | 1457 | return 0; |
1458 | } | 1458 | } |
@@ -1565,7 +1565,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg | |||
1565 | out_hdr->sadb_msg_reserved = 0; | 1565 | out_hdr->sadb_msg_reserved = 0; |
1566 | out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; | 1566 | out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; |
1567 | out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; | 1567 | out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; |
1568 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); | 1568 | pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk)); |
1569 | 1569 | ||
1570 | return 0; | 1570 | return 0; |
1571 | } | 1571 | } |
@@ -1670,7 +1670,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad | |||
1670 | return -ENOBUFS; | 1670 | return -ENOBUFS; |
1671 | } | 1671 | } |
1672 | 1672 | ||
1673 | pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk)); | 1673 | pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk)); |
1674 | 1674 | ||
1675 | return 0; | 1675 | return 0; |
1676 | } | 1676 | } |
@@ -1689,7 +1689,7 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) | |||
1689 | hdr->sadb_msg_errno = (uint8_t) 0; | 1689 | hdr->sadb_msg_errno = (uint8_t) 0; |
1690 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 1690 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
1691 | 1691 | ||
1692 | return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); | 1692 | return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk)); |
1693 | } | 1693 | } |
1694 | 1694 | ||
1695 | static int key_notify_sa_flush(const struct km_event *c) | 1695 | static int key_notify_sa_flush(const struct km_event *c) |
@@ -1710,7 +1710,7 @@ static int key_notify_sa_flush(const struct km_event *c) | |||
1710 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 1710 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
1711 | hdr->sadb_msg_reserved = 0; | 1711 | hdr->sadb_msg_reserved = 0; |
1712 | 1712 | ||
1713 | pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); | 1713 | pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net); |
1714 | 1714 | ||
1715 | return 0; | 1715 | return 0; |
1716 | } | 1716 | } |
@@ -1767,7 +1767,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr) | |||
1767 | out_hdr->sadb_msg_pid = pfk->dump.msg_portid; | 1767 | out_hdr->sadb_msg_pid = pfk->dump.msg_portid; |
1768 | 1768 | ||
1769 | if (pfk->dump.skb) | 1769 | if (pfk->dump.skb) |
1770 | pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, | 1770 | pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, |
1771 | &pfk->sk, sock_net(&pfk->sk)); | 1771 | &pfk->sk, sock_net(&pfk->sk)); |
1772 | pfk->dump.skb = out_skb; | 1772 | pfk->dump.skb = out_skb; |
1773 | 1773 | ||
@@ -1847,7 +1847,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb | |||
1847 | new_hdr->sadb_msg_errno = 0; | 1847 | new_hdr->sadb_msg_errno = 0; |
1848 | } | 1848 | } |
1849 | 1849 | ||
1850 | pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); | 1850 | pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk)); |
1851 | return 0; | 1851 | return 0; |
1852 | } | 1852 | } |
1853 | 1853 | ||
@@ -2181,7 +2181,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev | |||
2181 | out_hdr->sadb_msg_errno = 0; | 2181 | out_hdr->sadb_msg_errno = 0; |
2182 | out_hdr->sadb_msg_seq = c->seq; | 2182 | out_hdr->sadb_msg_seq = c->seq; |
2183 | out_hdr->sadb_msg_pid = c->portid; | 2183 | out_hdr->sadb_msg_pid = c->portid; |
2184 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); | 2184 | pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp)); |
2185 | return 0; | 2185 | return 0; |
2186 | 2186 | ||
2187 | } | 2187 | } |
@@ -2401,7 +2401,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc | |||
2401 | out_hdr->sadb_msg_errno = 0; | 2401 | out_hdr->sadb_msg_errno = 0; |
2402 | out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; | 2402 | out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; |
2403 | out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; | 2403 | out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; |
2404 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); | 2404 | pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp)); |
2405 | err = 0; | 2405 | err = 0; |
2406 | 2406 | ||
2407 | out: | 2407 | out: |
@@ -2655,7 +2655,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) | |||
2655 | out_hdr->sadb_msg_pid = pfk->dump.msg_portid; | 2655 | out_hdr->sadb_msg_pid = pfk->dump.msg_portid; |
2656 | 2656 | ||
2657 | if (pfk->dump.skb) | 2657 | if (pfk->dump.skb) |
2658 | pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, | 2658 | pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE, |
2659 | &pfk->sk, sock_net(&pfk->sk)); | 2659 | &pfk->sk, sock_net(&pfk->sk)); |
2660 | pfk->dump.skb = out_skb; | 2660 | pfk->dump.skb = out_skb; |
2661 | 2661 | ||
@@ -2708,7 +2708,7 @@ static int key_notify_policy_flush(const struct km_event *c) | |||
2708 | hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; | 2708 | hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; |
2709 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 2709 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
2710 | hdr->sadb_msg_reserved = 0; | 2710 | hdr->sadb_msg_reserved = 0; |
2711 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); | 2711 | pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net); |
2712 | return 0; | 2712 | return 0; |
2713 | 2713 | ||
2714 | } | 2714 | } |
@@ -2770,7 +2770,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb | |||
2770 | void *ext_hdrs[SADB_EXT_MAX]; | 2770 | void *ext_hdrs[SADB_EXT_MAX]; |
2771 | int err; | 2771 | int err; |
2772 | 2772 | ||
2773 | pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, | 2773 | pfkey_broadcast(skb_clone(skb, GFP_KERNEL), |
2774 | BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); | 2774 | BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); |
2775 | 2775 | ||
2776 | memset(ext_hdrs, 0, sizeof(ext_hdrs)); | 2776 | memset(ext_hdrs, 0, sizeof(ext_hdrs)); |
@@ -2992,7 +2992,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) | |||
2992 | out_hdr->sadb_msg_seq = 0; | 2992 | out_hdr->sadb_msg_seq = 0; |
2993 | out_hdr->sadb_msg_pid = 0; | 2993 | out_hdr->sadb_msg_pid = 0; |
2994 | 2994 | ||
2995 | pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x)); | 2995 | pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x)); |
2996 | return 0; | 2996 | return 0; |
2997 | } | 2997 | } |
2998 | 2998 | ||
@@ -3182,7 +3182,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct | |||
3182 | xfrm_ctx->ctx_len); | 3182 | xfrm_ctx->ctx_len); |
3183 | } | 3183 | } |
3184 | 3184 | ||
3185 | return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x)); | 3185 | return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); |
3186 | } | 3186 | } |
3187 | 3187 | ||
3188 | static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, | 3188 | static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, |
@@ -3380,7 +3380,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, | |||
3380 | n_port->sadb_x_nat_t_port_port = sport; | 3380 | n_port->sadb_x_nat_t_port_port = sport; |
3381 | n_port->sadb_x_nat_t_port_reserved = 0; | 3381 | n_port->sadb_x_nat_t_port_reserved = 0; |
3382 | 3382 | ||
3383 | return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x)); | 3383 | return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x)); |
3384 | } | 3384 | } |
3385 | 3385 | ||
3386 | #ifdef CONFIG_NET_KEY_MIGRATE | 3386 | #ifdef CONFIG_NET_KEY_MIGRATE |
@@ -3572,7 +3572,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, | |||
3572 | } | 3572 | } |
3573 | 3573 | ||
3574 | /* broadcast migrate message to sockets */ | 3574 | /* broadcast migrate message to sockets */ |
3575 | pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); | 3575 | pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net); |
3576 | 3576 | ||
3577 | return 0; | 3577 | return 0; |
3578 | 3578 | ||
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 247552a7f6c2..3ece7d1034c8 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c | |||
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma) | |||
92 | static inline void | 92 | static inline void |
93 | minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) | 93 | minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list) |
94 | { | 94 | { |
95 | int j = MAX_THR_RATES; | 95 | int j; |
96 | struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats; | 96 | struct minstrel_rate_stats *tmp_mrs; |
97 | struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; | 97 | struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats; |
98 | 98 | ||
99 | while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) > | 99 | for (j = MAX_THR_RATES; j > 0; --j) { |
100 | minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) { | ||
101 | j--; | ||
102 | tmp_mrs = &mi->r[tp_list[j - 1]].stats; | 100 | tmp_mrs = &mi->r[tp_list[j - 1]].stats; |
101 | if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <= | ||
102 | minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma)) | ||
103 | break; | ||
103 | } | 104 | } |
104 | 105 | ||
105 | if (j < MAX_THR_RATES - 1) | 106 | if (j < MAX_THR_RATES - 1) |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 67d210477863..a774985489e2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2401,7 +2401,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | |||
2401 | * sendmsg(), but that's what we've got... | 2401 | * sendmsg(), but that's what we've got... |
2402 | */ | 2402 | */ |
2403 | if (netlink_tx_is_mmaped(sk) && | 2403 | if (netlink_tx_is_mmaped(sk) && |
2404 | msg->msg_iter.type == ITER_IOVEC && | 2404 | iter_is_iovec(&msg->msg_iter) && |
2405 | msg->msg_iter.nr_segs == 1 && | 2405 | msg->msg_iter.nr_segs == 1 && |
2406 | msg->msg_iter.iov->iov_base == NULL) { | 2406 | msg->msg_iter.iov->iov_base == NULL) { |
2407 | err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, | 2407 | err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index cab9e9b43967..4fbb67430ce4 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -490,6 +490,19 @@ static bool u32_destroy(struct tcf_proto *tp, bool force) | |||
490 | return false; | 490 | return false; |
491 | } | 491 | } |
492 | } | 492 | } |
493 | |||
494 | if (tp_c->refcnt > 1) | ||
495 | return false; | ||
496 | |||
497 | if (tp_c->refcnt == 1) { | ||
498 | struct tc_u_hnode *ht; | ||
499 | |||
500 | for (ht = rtnl_dereference(tp_c->hlist); | ||
501 | ht; | ||
502 | ht = rtnl_dereference(ht->next)) | ||
503 | if (!ht_empty(ht)) | ||
504 | return false; | ||
505 | } | ||
493 | } | 506 | } |
494 | 507 | ||
495 | if (root_ht && --root_ht->refcnt == 0) | 508 | if (root_ht && --root_ht->refcnt == 0) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 06320c8c1c86..a655ddc3f353 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -3132,11 +3132,18 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, | |||
3132 | case SCTP_PARAM_IPV4_ADDRESS: | 3132 | case SCTP_PARAM_IPV4_ADDRESS: |
3133 | if (length != sizeof(sctp_ipv4addr_param_t)) | 3133 | if (length != sizeof(sctp_ipv4addr_param_t)) |
3134 | return false; | 3134 | return false; |
3135 | /* ensure there is only one addr param and it's in the | ||
3136 | * beginning of addip_hdr params, or we reject it. | ||
3137 | */ | ||
3138 | if (param.v != addip->addip_hdr.params) | ||
3139 | return false; | ||
3135 | addr_param_seen = true; | 3140 | addr_param_seen = true; |
3136 | break; | 3141 | break; |
3137 | case SCTP_PARAM_IPV6_ADDRESS: | 3142 | case SCTP_PARAM_IPV6_ADDRESS: |
3138 | if (length != sizeof(sctp_ipv6addr_param_t)) | 3143 | if (length != sizeof(sctp_ipv6addr_param_t)) |
3139 | return false; | 3144 | return false; |
3145 | if (param.v != addip->addip_hdr.params) | ||
3146 | return false; | ||
3140 | addr_param_seen = true; | 3147 | addr_param_seen = true; |
3141 | break; | 3148 | break; |
3142 | case SCTP_PARAM_ADD_IP: | 3149 | case SCTP_PARAM_ADD_IP: |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index fef2acdf4a2e..85e6f03aeb70 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -702,7 +702,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | |||
702 | * outstanding data and rely on the retransmission limit be reached | 702 | * outstanding data and rely on the retransmission limit be reached |
703 | * to shutdown the association. | 703 | * to shutdown the association. |
704 | */ | 704 | */ |
705 | if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING) | 705 | if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) |
706 | t->asoc->overall_error_count = 0; | 706 | t->asoc->overall_error_count = 0; |
707 | 707 | ||
708 | /* Clear the hb_sent flag to signal that we had a good | 708 | /* Clear the hb_sent flag to signal that we had a good |
diff --git a/security/security.c b/security/security.c index 595fffab48b0..994283624bdb 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -380,8 +380,8 @@ int security_inode_init_security(struct inode *inode, struct inode *dir, | |||
380 | return 0; | 380 | return 0; |
381 | 381 | ||
382 | if (!initxattrs) | 382 | if (!initxattrs) |
383 | return call_int_hook(inode_init_security, 0, inode, dir, qstr, | 383 | return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, |
384 | NULL, NULL, NULL); | 384 | dir, qstr, NULL, NULL, NULL); |
385 | memset(new_xattrs, 0, sizeof(new_xattrs)); | 385 | memset(new_xattrs, 0, sizeof(new_xattrs)); |
386 | lsm_xattr = new_xattrs; | 386 | lsm_xattr = new_xattrs; |
387 | ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr, | 387 | ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr, |
@@ -409,8 +409,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir, | |||
409 | { | 409 | { |
410 | if (unlikely(IS_PRIVATE(inode))) | 410 | if (unlikely(IS_PRIVATE(inode))) |
411 | return -EOPNOTSUPP; | 411 | return -EOPNOTSUPP; |
412 | return call_int_hook(inode_init_security, 0, inode, dir, qstr, | 412 | return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, |
413 | name, value, len); | 413 | qstr, name, value, len); |
414 | } | 414 | } |
415 | EXPORT_SYMBOL(security_old_inode_init_security); | 415 | EXPORT_SYMBOL(security_old_inode_init_security); |
416 | 416 | ||
@@ -1281,7 +1281,8 @@ int security_socket_getpeersec_stream(struct socket *sock, char __user *optval, | |||
1281 | 1281 | ||
1282 | int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) | 1282 | int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) |
1283 | { | 1283 | { |
1284 | return call_int_hook(socket_getpeersec_dgram, 0, sock, skb, secid); | 1284 | return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock, |
1285 | skb, secid); | ||
1285 | } | 1286 | } |
1286 | EXPORT_SYMBOL(security_socket_getpeersec_dgram); | 1287 | EXPORT_SYMBOL(security_socket_getpeersec_dgram); |
1287 | 1288 | ||
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 5de3c5d8c2c0..d1a2cb65e27c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -3172,7 +3172,7 @@ static int add_std_chmaps(struct hda_codec *codec) | |||
3172 | struct snd_pcm_chmap *chmap; | 3172 | struct snd_pcm_chmap *chmap; |
3173 | const struct snd_pcm_chmap_elem *elem; | 3173 | const struct snd_pcm_chmap_elem *elem; |
3174 | 3174 | ||
3175 | if (!pcm || pcm->own_chmap || | 3175 | if (!pcm || !pcm->pcm || pcm->own_chmap || |
3176 | !hinfo->substreams) | 3176 | !hinfo->substreams) |
3177 | continue; | 3177 | continue; |
3178 | elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps; | 3178 | elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps; |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b077bb644434..24f91114a32c 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -671,7 +671,8 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid, | |||
671 | } | 671 | } |
672 | for (i = 0; i < path->depth; i++) { | 672 | for (i = 0; i < path->depth; i++) { |
673 | if (path->path[i] == nid) { | 673 | if (path->path[i] == nid) { |
674 | if (dir == HDA_OUTPUT || path->idx[i] == idx) | 674 | if (dir == HDA_OUTPUT || idx == -1 || |
675 | path->idx[i] == idx) | ||
675 | return true; | 676 | return true; |
676 | break; | 677 | break; |
677 | } | 678 | } |
@@ -682,7 +683,7 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid, | |||
682 | 683 | ||
683 | /* check whether the NID is referred by any active paths */ | 684 | /* check whether the NID is referred by any active paths */ |
684 | #define is_active_nid_for_any(codec, nid) \ | 685 | #define is_active_nid_for_any(codec, nid) \ |
685 | is_active_nid(codec, nid, HDA_OUTPUT, 0) | 686 | is_active_nid(codec, nid, HDA_OUTPUT, -1) |
686 | 687 | ||
687 | /* get the default amp value for the target state */ | 688 | /* get the default amp value for the target state */ |
688 | static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, | 689 | static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, |
@@ -883,8 +884,7 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path, | |||
883 | struct hda_gen_spec *spec = codec->spec; | 884 | struct hda_gen_spec *spec = codec->spec; |
884 | int i; | 885 | int i; |
885 | 886 | ||
886 | if (!enable) | 887 | path->active = enable; |
887 | path->active = false; | ||
888 | 888 | ||
889 | /* make sure the widget is powered up */ | 889 | /* make sure the widget is powered up */ |
890 | if (enable && (spec->power_down_unused || codec->power_save_node)) | 890 | if (enable && (spec->power_down_unused || codec->power_save_node)) |
@@ -902,9 +902,6 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path, | |||
902 | if (has_amp_out(codec, path, i)) | 902 | if (has_amp_out(codec, path, i)) |
903 | activate_amp_out(codec, path, i, enable); | 903 | activate_amp_out(codec, path, i, enable); |
904 | } | 904 | } |
905 | |||
906 | if (enable) | ||
907 | path->active = true; | ||
908 | } | 905 | } |
909 | EXPORT_SYMBOL_GPL(snd_hda_activate_path); | 906 | EXPORT_SYMBOL_GPL(snd_hda_activate_path); |
910 | 907 | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index f788a91b544a..ca03c40609fc 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -200,12 +200,33 @@ static int cx_auto_init(struct hda_codec *codec) | |||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
202 | 202 | ||
203 | #define cx_auto_free snd_hda_gen_free | 203 | static void cx_auto_reboot_notify(struct hda_codec *codec) |
204 | { | ||
205 | struct conexant_spec *spec = codec->spec; | ||
206 | |||
207 | if (codec->core.vendor_id != 0x14f150f2) | ||
208 | return; | ||
209 | |||
210 | /* Turn the CX20722 codec into D3 to avoid spurious noises | ||
211 | from the internal speaker during (and after) reboot */ | ||
212 | cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false); | ||
213 | |||
214 | snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3); | ||
215 | snd_hda_codec_write(codec, codec->core.afg, 0, | ||
216 | AC_VERB_SET_POWER_STATE, AC_PWRST_D3); | ||
217 | } | ||
218 | |||
219 | static void cx_auto_free(struct hda_codec *codec) | ||
220 | { | ||
221 | cx_auto_reboot_notify(codec); | ||
222 | snd_hda_gen_free(codec); | ||
223 | } | ||
204 | 224 | ||
205 | static const struct hda_codec_ops cx_auto_patch_ops = { | 225 | static const struct hda_codec_ops cx_auto_patch_ops = { |
206 | .build_controls = cx_auto_build_controls, | 226 | .build_controls = cx_auto_build_controls, |
207 | .build_pcms = snd_hda_gen_build_pcms, | 227 | .build_pcms = snd_hda_gen_build_pcms, |
208 | .init = cx_auto_init, | 228 | .init = cx_auto_init, |
229 | .reboot_notify = cx_auto_reboot_notify, | ||
209 | .free = cx_auto_free, | 230 | .free = cx_auto_free, |
210 | .unsol_event = snd_hda_jack_unsol_event, | 231 | .unsol_event = snd_hda_jack_unsol_event, |
211 | #ifdef CONFIG_PM | 232 | #ifdef CONFIG_PM |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0b9847affbec..374ea53288ca 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -5190,6 +5190,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5190 | SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5190 | SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5191 | SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5191 | SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5192 | SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), | 5192 | SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
5193 | SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), | ||
5193 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5194 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5194 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 5195 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5195 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 5196 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
@@ -5291,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5291 | SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), | 5292 | SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), |
5292 | SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), | 5293 | SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), |
5293 | SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), | 5294 | SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), |
5295 | SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK), | ||
5294 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), | 5296 | SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), |
5295 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), | 5297 | SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), |
5296 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5298 | SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig index 2ae9619443d1..1d651b8a8957 100644 --- a/sound/soc/Kconfig +++ b/sound/soc/Kconfig | |||
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM | |||
30 | bool | 30 | bool |
31 | select SND_DMAENGINE_PCM | 31 | select SND_DMAENGINE_PCM |
32 | 32 | ||
33 | config SND_SOC_TOPOLOGY | ||
34 | bool | ||
35 | |||
33 | # All the supported SoCs | 36 | # All the supported SoCs |
34 | source "sound/soc/adi/Kconfig" | 37 | source "sound/soc/adi/Kconfig" |
35 | source "sound/soc/atmel/Kconfig" | 38 | source "sound/soc/atmel/Kconfig" |
diff --git a/sound/soc/Makefile b/sound/soc/Makefile index e189903fabf4..669648b41d30 100644 --- a/sound/soc/Makefile +++ b/sound/soc/Makefile | |||
@@ -1,6 +1,9 @@ | |||
1 | snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o | 1 | snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o |
2 | snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o | 2 | snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o |
3 | |||
4 | ifneq ($(CONFIG_SND_SOC_TOPOLOGY),) | ||
3 | snd-soc-core-objs += soc-topology.o | 5 | snd-soc-core-objs += soc-topology.o |
6 | endif | ||
4 | 7 | ||
5 | ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),) | 8 | ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),) |
6 | snd-soc-core-objs += soc-generic-dmaengine-pcm.o | 9 | snd-soc-core-objs += soc-generic-dmaengine-pcm.o |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 1fab9778807a..0450593980fd 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) | |||
638 | int err = -ENODEV; | 638 | int err = -ENODEV; |
639 | 639 | ||
640 | down_read(&chip->shutdown_rwsem); | 640 | down_read(&chip->shutdown_rwsem); |
641 | if (chip->probing && chip->in_pm) | 641 | if (chip->probing || chip->in_pm) |
642 | err = 0; | 642 | err = 0; |
643 | else if (!chip->shutdown) | 643 | else if (!chip->shutdown) |
644 | err = usb_autopm_get_interface(chip->pm_intf); | 644 | err = usb_autopm_get_interface(chip->pm_intf); |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 754e689596a2..00ebc0ca008e 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1268,6 +1268,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, | |||
1268 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; | 1268 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
1269 | break; | 1269 | break; |
1270 | 1270 | ||
1271 | case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */ | ||
1271 | case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ | 1272 | case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ |
1272 | case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ | 1273 | case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ |
1273 | if (fp->altsetting == 3) | 1274 | if (fp->altsetting == 3) |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 7aa039bd379a..d46dbb1bc65d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -565,7 +565,7 @@ static void print_aggr(char *prefix) | |||
565 | { | 565 | { |
566 | FILE *output = stat_config.output; | 566 | FILE *output = stat_config.output; |
567 | struct perf_evsel *counter; | 567 | struct perf_evsel *counter; |
568 | int cpu, cpu2, s, s2, id, nr; | 568 | int cpu, s, s2, id, nr; |
569 | double uval; | 569 | double uval; |
570 | u64 ena, run, val; | 570 | u64 ena, run, val; |
571 | 571 | ||
@@ -578,8 +578,7 @@ static void print_aggr(char *prefix) | |||
578 | val = ena = run = 0; | 578 | val = ena = run = 0; |
579 | nr = 0; | 579 | nr = 0; |
580 | for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { | 580 | for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { |
581 | cpu2 = perf_evsel__cpus(counter)->map[cpu]; | 581 | s2 = aggr_get_id(perf_evsel__cpus(counter), cpu); |
582 | s2 = aggr_get_id(evsel_list->cpus, cpu2); | ||
583 | if (s2 != id) | 582 | if (s2 != id) |
584 | continue; | 583 | continue; |
585 | val += perf_counts(counter->counts, cpu, 0)->val; | 584 | val += perf_counts(counter->counts, cpu, 0)->val; |