aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/power/suspend-and-interrupts.txt123
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile2
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/tls.h2
-rw-r--r--arch/arm/kernel/kprobes-test.c16
-rw-r--r--arch/arm/kernel/kprobes-test.h5
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/proc-v7-3level.S4
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--drivers/acpi/acpi_lpss.c167
-rw-r--r--drivers/acpi/acpi_pnp.c4
-rw-r--r--drivers/acpi/acpica/evxfgpe.c32
-rw-r--r--drivers/acpi/acpica/hwgpe.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c4
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c36
-rw-r--r--drivers/acpi/fan.c18
-rw-r--r--drivers/acpi/osl.c10
-rw-r--r--drivers/acpi/pci_root.c14
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/sbs.c80
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/acpi/video.c291
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/syscore.c7
-rw-r--r--drivers/cpufreq/cpufreq.c7
-rw-r--r--drivers/cpufreq/integrator-cpufreq.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/i2c/busses/i2c-qup.c12
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/md/raid5.c18
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/usb/r8152.c88
-rw-r--r--drivers/parisc/superio.c3
-rw-r--r--drivers/pci/pcie/pme.c61
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c16
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c57
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--fs/cifs/file.c8
-rw-r--r--fs/cifs/smb1ops.c2
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/nfsd/nfs4xdr.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--include/acpi/acnames.h4
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/actbl1.h19
-rw-r--r--include/acpi/actbl3.h9
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irqdesc.h10
-rw-r--r--include/linux/suspend.h4
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/net_namespace.h20
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/irq/chip.c85
-rw-r--r--kernel/irq/internals.h16
-rw-r--r--kernel/irq/manage.c32
-rw-r--r--kernel/irq/pm.c159
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--lib/rhashtable.c8
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/memcontrol.c36
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/page_alloc.c7
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/ipv4/ip_tunnel.c11
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/addrconf_core.c7
-rw-r--r--net/ipv6/ip6_fib.c20
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ip6_vti.c6
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/nfnetlink.c64
-rw-r--r--net/netfilter/nft_hash.c12
-rw-r--r--net/netfilter/nft_rbtree.c2
-rw-r--r--net/sched/ematch.c6
-rw-r--r--sound/soc/codecs/rt286.c7
-rw-r--r--sound/soc/codecs/ssm2602.c2
-rw-r--r--sound/soc/fsl/fsl_ssi.c12
-rw-r--r--sound/soc/soc-compress.c6
-rw-r--r--sound/soc/soc-core.c2
111 files changed, 1215 insertions, 819 deletions
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt
new file mode 100644
index 000000000000..69663640dea5
--- /dev/null
+++ b/Documentation/power/suspend-and-interrupts.txt
@@ -0,0 +1,123 @@
1System Suspend and Device Interrupts
2
3Copyright (C) 2014 Intel Corp.
4Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
5
6
7Suspending and Resuming Device IRQs
8-----------------------------------
9
10Device interrupt request lines (IRQs) are generally disabled during system
11suspend after the "late" phase of suspending devices (that is, after all of the
12->prepare, ->suspend and ->suspend_late callbacks have been executed for all
13devices). That is done by suspend_device_irqs().
14
15The rationale for doing so is that after the "late" phase of device suspend
16there is no legitimate reason why any interrupts from suspended devices should
17trigger and if any devices have not been suspended properly yet, it is better to
18block interrupts from them anyway. Also, in the past we had problems with
19interrupt handlers for shared IRQs that device drivers implementing them were
20not prepared for interrupts triggering after their devices had been suspended.
21In some cases they would attempt to access, for example, memory address spaces
22of suspended devices and cause unpredictable behavior to ensue as a result.
23Unfortunately, such problems are very difficult to debug and the introduction
24of suspend_device_irqs(), along with the "noirq" phase of device suspend and
25resume, was the only practical way to mitigate them.
26
27Device IRQs are re-enabled during system resume, right before the "early" phase
28of resuming devices (that is, before starting to execute ->resume_early
29callbacks for devices). The function doing that is resume_device_irqs().
30
31
32The IRQF_NO_SUSPEND Flag
33------------------------
34
35There are interrupts that can legitimately trigger during the entire system
36suspend-resume cycle, including the "noirq" phases of suspending and resuming
37devices as well as during the time when nonboot CPUs are taken offline and
38brought back online. That applies to timer interrupts in the first place,
39but also to IPIs and to some other special-purpose interrupts.
40
41The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when
42requesting a special-purpose interrupt. It causes suspend_device_irqs() to
43leave the corresponding IRQ enabled so as to allow the interrupt to work all
44the time as expected.
45
46Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one
47user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed
48for it will be executed as usual after suspend_device_irqs(), even if the
49IRQF_NO_SUSPEND flag was not passed to request_irq() (or equivalent) by some of
50the IRQ's users. For this reason, using IRQF_NO_SUSPEND and IRQF_SHARED at the
51same time should be avoided.
52
53
54System Wakeup Interrupts, enable_irq_wake() and disable_irq_wake()
55------------------------------------------------------------------
56
57System wakeup interrupts generally need to be configured to wake up the system
58from sleep states, especially if they are used for different purposes (e.g. as
59I/O interrupts) in the working state.
60
61That may involve turning on a special signal handling logic within the platform
62(such as an SoC) so that signals from a given line are routed in a different way
63during system sleep so as to trigger a system wakeup when needed. For example,
64the platform may include a dedicated interrupt controller used specifically for
65handling system wakeup events. Then, if a given interrupt line is supposed to
66wake up the system from sleep sates, the corresponding input of that interrupt
67controller needs to be enabled to receive signals from the line in question.
68After wakeup, it generally is better to disable that input to prevent the
69dedicated controller from triggering interrupts unnecessarily.
70
71The IRQ subsystem provides two helper functions to be used by device drivers for
72those purposes. Namely, enable_irq_wake() turns on the platform's logic for
73handling the given IRQ as a system wakeup interrupt line and disable_irq_wake()
74turns that logic off.
75
76Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ
77in a special way. Namely, the IRQ remains enabled, by on the first interrupt
78it will be disabled, marked as pending and "suspended" so that it will be
79re-enabled by resume_device_irqs() during the subsequent system resume. Also
80the PM core is notified about the event which casues the system suspend in
81progress to be aborted (that doesn't have to happen immediately, but at one
82of the points where the suspend thread looks for pending wakeup events).
83
84This way every interrupt from a wakeup interrupt source will either cause the
85system suspend currently in progress to be aborted or wake up the system if
86already suspended. However, after suspend_device_irqs() interrupt handlers are
87not executed for system wakeup IRQs. They are only executed for IRQF_NO_SUSPEND
88IRQs at that time, but those IRQs should not be configured for system wakeup
89using enable_irq_wake().
90
91
92Interrupts and Suspend-to-Idle
93------------------------------
94
95Suspend-to-idle (also known as the "freeze" sleep state) is a relatively new
96system sleep state that works by idling all of the processors and waiting for
97interrupts right after the "noirq" phase of suspending devices.
98
99Of course, this means that all of the interrupts with the IRQF_NO_SUSPEND flag
100set will bring CPUs out of idle while in that state, but they will not cause the
101IRQ subsystem to trigger a system wakeup.
102
103System wakeup interrupts, in turn, will trigger wakeup from suspend-to-idle in
104analogy with what they do in the full system suspend case. The only difference
105is that the wakeup from suspend-to-idle is signaled using the usual working
106state interrupt delivery mechanisms and doesn't require the platform to use
107any special interrupt handling logic for it to work.
108
109
110IRQF_NO_SUSPEND and enable_irq_wake()
111-------------------------------------
112
113There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND
114flag on the same IRQ.
115
116First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND
117interrupts (interrupt handlers are invoked after suspend_device_irqs()) are
118directly at odds with the rules for handling system wakeup interrupts (interrupt
119handlers are not invoked after suspend_device_irqs()).
120
121Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not
122to individual interrupt handlers, so sharing an IRQ between a system wakeup
123interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense.
diff --git a/MAINTAINERS b/MAINTAINERS
index 37054306dc9f..f10ed3914ea8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1665,6 +1665,12 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com>
1665S: Supported 1665S: Supported
1666F: drivers/tty/serial/atmel_serial.c 1666F: drivers/tty/serial/atmel_serial.c
1667 1667
1668ATMEL Audio ALSA driver
1669M: Bo Shen <voice.shen@atmel.com>
1670L: alsa-devel@alsa-project.org (moderated for non-subscribers)
1671S: Supported
1672F: sound/soc/atmel
1673
1668ATMEL DMA DRIVER 1674ATMEL DMA DRIVER
1669M: Nicolas Ferre <nicolas.ferre@atmel.com> 1675M: Nicolas Ferre <nicolas.ferre@atmel.com>
1670L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1676L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2098,7 +2104,7 @@ S: Supported
2098F: drivers/scsi/bfa/ 2104F: drivers/scsi/bfa/
2099 2105
2100BROCADE BNA 10 GIGABIT ETHERNET DRIVER 2106BROCADE BNA 10 GIGABIT ETHERNET DRIVER
2101M: Rasesh Mody <rmody@brocade.com> 2107M: Rasesh Mody <rasesh.mody@qlogic.com>
2102L: netdev@vger.kernel.org 2108L: netdev@vger.kernel.org
2103S: Supported 2109S: Supported
2104F: drivers/net/ethernet/brocade/bna/ 2110F: drivers/net/ethernet/brocade/bna/
@@ -5478,7 +5484,7 @@ F: drivers/macintosh/
5478LINUX FOR POWERPC EMBEDDED MPC5XXX 5484LINUX FOR POWERPC EMBEDDED MPC5XXX
5479M: Anatolij Gustschin <agust@denx.de> 5485M: Anatolij Gustschin <agust@denx.de>
5480L: linuxppc-dev@lists.ozlabs.org 5486L: linuxppc-dev@lists.ozlabs.org
5481T: git git://git.denx.de/linux-2.6-agust.git 5487T: git git://git.denx.de/linux-denx-agust.git
5482S: Maintained 5488S: Maintained
5483F: arch/powerpc/platforms/512x/ 5489F: arch/powerpc/platforms/512x/
5484F: arch/powerpc/platforms/52xx/ 5490F: arch/powerpc/platforms/52xx/
diff --git a/Makefile b/Makefile
index be79944f74d2..b77de27e58fc 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 17 2PATCHLEVEL = 17
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc7 4EXTRAVERSION =
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 79ecb4f34ffb..10e78d00a0bb 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -466,6 +466,7 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
466 */ 466 */
467#define v7_exit_coherency_flush(level) \ 467#define v7_exit_coherency_flush(level) \
468 asm volatile( \ 468 asm volatile( \
469 ".arch armv7-a \n\t" \
469 "stmfd sp!, {fp, ip} \n\t" \ 470 "stmfd sp!, {fp, ip} \n\t" \
470 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ 471 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
471 "bic r0, r0, #"__stringify(CR_C)" \n\t" \ 472 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 36172adda9d0..5f833f7adba1 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -81,6 +81,7 @@ static inline void set_tls(unsigned long val)
81 asm("mcr p15, 0, %0, c13, c0, 3" 81 asm("mcr p15, 0, %0, c13, c0, 3"
82 : : "r" (val)); 82 : : "r" (val));
83 } else { 83 } else {
84#ifdef CONFIG_KUSER_HELPERS
84 /* 85 /*
85 * User space must never try to access this 86 * User space must never try to access this
86 * directly. Expect your app to break 87 * directly. Expect your app to break
@@ -89,6 +90,7 @@ static inline void set_tls(unsigned long val)
89 * entry-armv.S for details) 90 * entry-armv.S for details)
90 */ 91 */
91 *((unsigned int *)0xffff0ff0) = val; 92 *((unsigned int *)0xffff0ff0) = val;
93#endif
92 } 94 }
93 95
94 } 96 }
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index 08d731294bcd..b206d7790c77 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -110,10 +110,13 @@
110 * 110 *
111 * @ TESTCASE_START 111 * @ TESTCASE_START
112 * bl __kprobes_test_case_start 112 * bl __kprobes_test_case_start
113 * @ start of inline data... 113 * .pushsection .rodata
114 * "10:
114 * .ascii "mov r0, r7" @ text title for test case 115 * .ascii "mov r0, r7" @ text title for test case
115 * .byte 0 116 * .byte 0
116 * .align 2, 0 117 * .popsection
118 * @ start of inline data...
119 * .word 10b @ pointer to title in .rodata section
117 * 120 *
118 * @ TEST_ARG_REG 121 * @ TEST_ARG_REG
119 * .byte ARG_TYPE_REG 122 * .byte ARG_TYPE_REG
@@ -971,7 +974,7 @@ void __naked __kprobes_test_case_start(void)
971 __asm__ __volatile__ ( 974 __asm__ __volatile__ (
972 "stmdb sp!, {r4-r11} \n\t" 975 "stmdb sp!, {r4-r11} \n\t"
973 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t" 976 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
974 "bic r0, lr, #1 @ r0 = inline title string \n\t" 977 "bic r0, lr, #1 @ r0 = inline data \n\t"
975 "mov r1, sp \n\t" 978 "mov r1, sp \n\t"
976 "bl kprobes_test_case_start \n\t" 979 "bl kprobes_test_case_start \n\t"
977 "bx r0 \n\t" 980 "bx r0 \n\t"
@@ -1349,15 +1352,14 @@ static unsigned long next_instruction(unsigned long pc)
1349 return pc + 4; 1352 return pc + 4;
1350} 1353}
1351 1354
1352static uintptr_t __used kprobes_test_case_start(const char *title, void *stack) 1355static uintptr_t __used kprobes_test_case_start(const char **title, void *stack)
1353{ 1356{
1354 struct test_arg *args; 1357 struct test_arg *args;
1355 struct test_arg_end *end_arg; 1358 struct test_arg_end *end_arg;
1356 unsigned long test_code; 1359 unsigned long test_code;
1357 1360
1358 args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4); 1361 current_title = *title++;
1359 1362 args = (struct test_arg *)title;
1360 current_title = title;
1361 current_args = args; 1363 current_args = args;
1362 current_stack = stack; 1364 current_stack = stack;
1363 1365
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
index eecc90a0fd91..4430990e90e7 100644
--- a/arch/arm/kernel/kprobes-test.h
+++ b/arch/arm/kernel/kprobes-test.h
@@ -111,11 +111,14 @@ struct test_arg_end {
111#define TESTCASE_START(title) \ 111#define TESTCASE_START(title) \
112 __asm__ __volatile__ ( \ 112 __asm__ __volatile__ ( \
113 "bl __kprobes_test_case_start \n\t" \ 113 "bl __kprobes_test_case_start \n\t" \
114 ".pushsection .rodata \n\t" \
115 "10: \n\t" \
114 /* don't use .asciz here as 'title' may be */ \ 116 /* don't use .asciz here as 'title' may be */ \
115 /* multiple strings to be concatenated. */ \ 117 /* multiple strings to be concatenated. */ \
116 ".ascii "#title" \n\t" \ 118 ".ascii "#title" \n\t" \
117 ".byte 0 \n\t" \ 119 ".byte 0 \n\t" \
118 ".align 2, 0 \n\t" 120 ".popsection \n\t" \
121 ".word 10b \n\t"
119 122
120#define TEST_ARG_REG(reg, val) \ 123#define TEST_ARG_REG(reg, val) \
121 ".byte "__stringify(ARG_TYPE_REG)" \n\t" \ 124 ".byte "__stringify(ARG_TYPE_REG)" \n\t" \
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 0c1ab49e5f7b..83792f4324ea 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -41,6 +41,7 @@
41 * This code is not portable to processors with late data abort handling. 41 * This code is not portable to processors with late data abort handling.
42 */ 42 */
43#define CODING_BITS(i) (i & 0x0e000000) 43#define CODING_BITS(i) (i & 0x0e000000)
44#define COND_BITS(i) (i & 0xf0000000)
44 45
45#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */ 46#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
46#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */ 47#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
@@ -821,6 +822,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
821 break; 822 break;
822 823
823 case 0x04000000: /* ldr or str immediate */ 824 case 0x04000000: /* ldr or str immediate */
825 if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
826 goto bad;
824 offset.un = OFFSET_BITS(instr); 827 offset.un = OFFSET_BITS(instr);
825 handler = do_alignment_ldrstr; 828 handler = do_alignment_ldrstr;
826 break; 829 break;
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index b64e67c7f176..d3daed0ae0ad 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -157,9 +157,9 @@ ENDPROC(cpu_v7_set_pte_ext)
157 * TFR EV X F IHD LR S 157 * TFR EV X F IHD LR S
158 * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM 158 * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
159 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced 159 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
160 * 11 0 110 1 0011 1100 .111 1101 < we want 160 * 11 0 110 0 0011 1100 .111 1101 < we want
161 */ 161 */
162 .align 2 162 .align 2
163 .type v7_crval, #object 163 .type v7_crval, #object
164v7_crval: 164v7_crval:
165 crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c 165 crval clear=0x0122c302, mmuset=0x30c03c7d, ucset=0x00c01c7c
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 337ce5a9b15c..1183d545da1e 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2623,6 +2623,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
2623 .irq_eoi = ack_apic_level, 2623 .irq_eoi = ack_apic_level,
2624 .irq_set_affinity = native_ioapic_set_affinity, 2624 .irq_set_affinity = native_ioapic_set_affinity,
2625 .irq_retrigger = ioapic_retrigger_irq, 2625 .irq_retrigger = ioapic_retrigger_irq,
2626 .flags = IRQCHIP_SKIP_SET_WAKE,
2626}; 2627};
2627 2628
2628static inline void init_IO_APIC_traps(void) 2629static inline void init_IO_APIC_traps(void)
@@ -3173,6 +3174,7 @@ static struct irq_chip msi_chip = {
3173 .irq_ack = ack_apic_edge, 3174 .irq_ack = ack_apic_edge,
3174 .irq_set_affinity = msi_set_affinity, 3175 .irq_set_affinity = msi_set_affinity,
3175 .irq_retrigger = ioapic_retrigger_irq, 3176 .irq_retrigger = ioapic_retrigger_irq,
3177 .flags = IRQCHIP_SKIP_SET_WAKE,
3176}; 3178};
3177 3179
3178int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, 3180int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
@@ -3271,6 +3273,7 @@ static struct irq_chip dmar_msi_type = {
3271 .irq_ack = ack_apic_edge, 3273 .irq_ack = ack_apic_edge,
3272 .irq_set_affinity = dmar_msi_set_affinity, 3274 .irq_set_affinity = dmar_msi_set_affinity,
3273 .irq_retrigger = ioapic_retrigger_irq, 3275 .irq_retrigger = ioapic_retrigger_irq,
3276 .flags = IRQCHIP_SKIP_SET_WAKE,
3274}; 3277};
3275 3278
3276int arch_setup_dmar_msi(unsigned int irq) 3279int arch_setup_dmar_msi(unsigned int irq)
@@ -3321,6 +3324,7 @@ static struct irq_chip hpet_msi_type = {
3321 .irq_ack = ack_apic_edge, 3324 .irq_ack = ack_apic_edge,
3322 .irq_set_affinity = hpet_msi_set_affinity, 3325 .irq_set_affinity = hpet_msi_set_affinity,
3323 .irq_retrigger = ioapic_retrigger_irq, 3326 .irq_retrigger = ioapic_retrigger_irq,
3327 .flags = IRQCHIP_SKIP_SET_WAKE,
3324}; 3328};
3325 3329
3326int default_setup_hpet_msi(unsigned int irq, unsigned int id) 3330int default_setup_hpet_msi(unsigned int irq, unsigned int id)
@@ -3384,6 +3388,7 @@ static struct irq_chip ht_irq_chip = {
3384 .irq_ack = ack_apic_edge, 3388 .irq_ack = ack_apic_edge,
3385 .irq_set_affinity = ht_set_affinity, 3389 .irq_set_affinity = ht_set_affinity,
3386 .irq_retrigger = ioapic_retrigger_irq, 3390 .irq_retrigger = ioapic_retrigger_irq,
3391 .flags = IRQCHIP_SKIP_SET_WAKE,
3387}; 3392};
3388 3393
3389int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3394int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index b0ea767c8696..93d160661f4c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -54,55 +54,58 @@ ACPI_MODULE_NAME("acpi_lpss");
54 54
55#define LPSS_PRV_REG_COUNT 9 55#define LPSS_PRV_REG_COUNT 9
56 56
57struct lpss_shared_clock { 57/* LPSS Flags */
58 const char *name; 58#define LPSS_CLK BIT(0)
59 unsigned long rate; 59#define LPSS_CLK_GATE BIT(1)
60 struct clk *clk; 60#define LPSS_CLK_DIVIDER BIT(2)
61}; 61#define LPSS_LTR BIT(3)
62#define LPSS_SAVE_CTX BIT(4)
62 63
63struct lpss_private_data; 64struct lpss_private_data;
64 65
65struct lpss_device_desc { 66struct lpss_device_desc {
66 bool clk_required; 67 unsigned int flags;
67 const char *clkdev_name;
68 bool ltr_required;
69 unsigned int prv_offset; 68 unsigned int prv_offset;
70 size_t prv_size_override; 69 size_t prv_size_override;
71 bool clk_divider;
72 bool clk_gate;
73 bool save_ctx;
74 struct lpss_shared_clock *shared_clock;
75 void (*setup)(struct lpss_private_data *pdata); 70 void (*setup)(struct lpss_private_data *pdata);
76}; 71};
77 72
78static struct lpss_device_desc lpss_dma_desc = { 73static struct lpss_device_desc lpss_dma_desc = {
79 .clk_required = true, 74 .flags = LPSS_CLK,
80 .clkdev_name = "hclk",
81}; 75};
82 76
83struct lpss_private_data { 77struct lpss_private_data {
84 void __iomem *mmio_base; 78 void __iomem *mmio_base;
85 resource_size_t mmio_size; 79 resource_size_t mmio_size;
80 unsigned int fixed_clk_rate;
86 struct clk *clk; 81 struct clk *clk;
87 const struct lpss_device_desc *dev_desc; 82 const struct lpss_device_desc *dev_desc;
88 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT]; 83 u32 prv_reg_ctx[LPSS_PRV_REG_COUNT];
89}; 84};
90 85
86/* UART Component Parameter Register */
87#define LPSS_UART_CPR 0xF4
88#define LPSS_UART_CPR_AFCE BIT(4)
89
91static void lpss_uart_setup(struct lpss_private_data *pdata) 90static void lpss_uart_setup(struct lpss_private_data *pdata)
92{ 91{
93 unsigned int offset; 92 unsigned int offset;
94 u32 reg; 93 u32 val;
95 94
96 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT; 95 offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
97 reg = readl(pdata->mmio_base + offset); 96 val = readl(pdata->mmio_base + offset);
98 writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset); 97 writel(val | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
99 98
100 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL; 99 val = readl(pdata->mmio_base + LPSS_UART_CPR);
101 reg = readl(pdata->mmio_base + offset); 100 if (!(val & LPSS_UART_CPR_AFCE)) {
102 writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset); 101 offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
102 val = readl(pdata->mmio_base + offset);
103 val |= LPSS_GENERAL_UART_RTS_OVRD;
104 writel(val, pdata->mmio_base + offset);
105 }
103} 106}
104 107
105static void lpss_i2c_setup(struct lpss_private_data *pdata) 108static void byt_i2c_setup(struct lpss_private_data *pdata)
106{ 109{
107 unsigned int offset; 110 unsigned int offset;
108 u32 val; 111 u32 val;
@@ -111,100 +114,56 @@ static void lpss_i2c_setup(struct lpss_private_data *pdata)
111 val = readl(pdata->mmio_base + offset); 114 val = readl(pdata->mmio_base + offset);
112 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; 115 val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
113 writel(val, pdata->mmio_base + offset); 116 writel(val, pdata->mmio_base + offset);
114}
115 117
116static struct lpss_device_desc wpt_dev_desc = { 118 if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
117 .clk_required = true, 119 pdata->fixed_clk_rate = 133000000;
118 .prv_offset = 0x800, 120}
119 .ltr_required = true,
120 .clk_divider = true,
121 .clk_gate = true,
122};
123 121
124static struct lpss_device_desc lpt_dev_desc = { 122static struct lpss_device_desc lpt_dev_desc = {
125 .clk_required = true, 123 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
126 .prv_offset = 0x800, 124 .prv_offset = 0x800,
127 .ltr_required = true,
128 .clk_divider = true,
129 .clk_gate = true,
130}; 125};
131 126
132static struct lpss_device_desc lpt_i2c_dev_desc = { 127static struct lpss_device_desc lpt_i2c_dev_desc = {
133 .clk_required = true, 128 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
134 .prv_offset = 0x800, 129 .prv_offset = 0x800,
135 .ltr_required = true,
136 .clk_gate = true,
137}; 130};
138 131
139static struct lpss_device_desc lpt_uart_dev_desc = { 132static struct lpss_device_desc lpt_uart_dev_desc = {
140 .clk_required = true, 133 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
141 .prv_offset = 0x800, 134 .prv_offset = 0x800,
142 .ltr_required = true,
143 .clk_divider = true,
144 .clk_gate = true,
145 .setup = lpss_uart_setup, 135 .setup = lpss_uart_setup,
146}; 136};
147 137
148static struct lpss_device_desc lpt_sdio_dev_desc = { 138static struct lpss_device_desc lpt_sdio_dev_desc = {
139 .flags = LPSS_LTR,
149 .prv_offset = 0x1000, 140 .prv_offset = 0x1000,
150 .prv_size_override = 0x1018, 141 .prv_size_override = 0x1018,
151 .ltr_required = true,
152};
153
154static struct lpss_shared_clock pwm_clock = {
155 .name = "pwm_clk",
156 .rate = 25000000,
157}; 142};
158 143
159static struct lpss_device_desc byt_pwm_dev_desc = { 144static struct lpss_device_desc byt_pwm_dev_desc = {
160 .clk_required = true, 145 .flags = LPSS_SAVE_CTX,
161 .save_ctx = true,
162 .shared_clock = &pwm_clock,
163}; 146};
164 147
165static struct lpss_device_desc byt_uart_dev_desc = { 148static struct lpss_device_desc byt_uart_dev_desc = {
166 .clk_required = true, 149 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
167 .prv_offset = 0x800, 150 .prv_offset = 0x800,
168 .clk_divider = true,
169 .clk_gate = true,
170 .save_ctx = true,
171 .setup = lpss_uart_setup, 151 .setup = lpss_uart_setup,
172}; 152};
173 153
174static struct lpss_device_desc byt_spi_dev_desc = { 154static struct lpss_device_desc byt_spi_dev_desc = {
175 .clk_required = true, 155 .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
176 .prv_offset = 0x400, 156 .prv_offset = 0x400,
177 .clk_divider = true,
178 .clk_gate = true,
179 .save_ctx = true,
180}; 157};
181 158
182static struct lpss_device_desc byt_sdio_dev_desc = { 159static struct lpss_device_desc byt_sdio_dev_desc = {
183 .clk_required = true, 160 .flags = LPSS_CLK,
184};
185
186static struct lpss_shared_clock i2c_clock = {
187 .name = "i2c_clk",
188 .rate = 100000000,
189}; 161};
190 162
191static struct lpss_device_desc byt_i2c_dev_desc = { 163static struct lpss_device_desc byt_i2c_dev_desc = {
192 .clk_required = true, 164 .flags = LPSS_CLK | LPSS_SAVE_CTX,
193 .prv_offset = 0x800, 165 .prv_offset = 0x800,
194 .save_ctx = true, 166 .setup = byt_i2c_setup,
195 .shared_clock = &i2c_clock,
196 .setup = lpss_i2c_setup,
197};
198
199static struct lpss_shared_clock bsw_pwm_clock = {
200 .name = "pwm_clk",
201 .rate = 19200000,
202};
203
204static struct lpss_device_desc bsw_pwm_dev_desc = {
205 .clk_required = true,
206 .save_ctx = true,
207 .shared_clock = &bsw_pwm_clock,
208}; 167};
209 168
210#else 169#else
@@ -237,7 +196,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
237 { "INT33FC", }, 196 { "INT33FC", },
238 197
239 /* Braswell LPSS devices */ 198 /* Braswell LPSS devices */
240 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc) }, 199 { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
241 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, 200 { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
242 { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, 201 { "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
243 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, 202 { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
@@ -251,7 +210,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
251 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) }, 210 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
252 { "INT3437", }, 211 { "INT3437", },
253 212
254 { "INT3438", LPSS_ADDR(wpt_dev_desc) }, 213 /* Wildcat Point LPSS devices */
214 { "INT3438", LPSS_ADDR(lpt_dev_desc) },
255 215
256 { } 216 { }
257}; 217};
@@ -276,7 +236,6 @@ static int register_device_clock(struct acpi_device *adev,
276 struct lpss_private_data *pdata) 236 struct lpss_private_data *pdata)
277{ 237{
278 const struct lpss_device_desc *dev_desc = pdata->dev_desc; 238 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
279 struct lpss_shared_clock *shared_clock = dev_desc->shared_clock;
280 const char *devname = dev_name(&adev->dev); 239 const char *devname = dev_name(&adev->dev);
281 struct clk *clk = ERR_PTR(-ENODEV); 240 struct clk *clk = ERR_PTR(-ENODEV);
282 struct lpss_clk_data *clk_data; 241 struct lpss_clk_data *clk_data;
@@ -289,12 +248,7 @@ static int register_device_clock(struct acpi_device *adev,
289 clk_data = platform_get_drvdata(lpss_clk_dev); 248 clk_data = platform_get_drvdata(lpss_clk_dev);
290 if (!clk_data) 249 if (!clk_data)
291 return -ENODEV; 250 return -ENODEV;
292 251 clk = clk_data->clk;
293 if (dev_desc->clkdev_name) {
294 clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
295 devname);
296 return 0;
297 }
298 252
299 if (!pdata->mmio_base 253 if (!pdata->mmio_base
300 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) 254 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
@@ -303,24 +257,19 @@ static int register_device_clock(struct acpi_device *adev,
303 parent = clk_data->name; 257 parent = clk_data->name;
304 prv_base = pdata->mmio_base + dev_desc->prv_offset; 258 prv_base = pdata->mmio_base + dev_desc->prv_offset;
305 259
306 if (shared_clock) { 260 if (pdata->fixed_clk_rate) {
307 clk = shared_clock->clk; 261 clk = clk_register_fixed_rate(NULL, devname, parent, 0,
308 if (!clk) { 262 pdata->fixed_clk_rate);
309 clk = clk_register_fixed_rate(NULL, shared_clock->name, 263 goto out;
310 "lpss_clk", 0,
311 shared_clock->rate);
312 shared_clock->clk = clk;
313 }
314 parent = shared_clock->name;
315 } 264 }
316 265
317 if (dev_desc->clk_gate) { 266 if (dev_desc->flags & LPSS_CLK_GATE) {
318 clk = clk_register_gate(NULL, devname, parent, 0, 267 clk = clk_register_gate(NULL, devname, parent, 0,
319 prv_base, 0, 0, NULL); 268 prv_base, 0, 0, NULL);
320 parent = devname; 269 parent = devname;
321 } 270 }
322 271
323 if (dev_desc->clk_divider) { 272 if (dev_desc->flags & LPSS_CLK_DIVIDER) {
324 /* Prevent division by zero */ 273 /* Prevent division by zero */
325 if (!readl(prv_base)) 274 if (!readl(prv_base))
326 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base); 275 writel(LPSS_CLK_DIVIDER_DEF_MASK, prv_base);
@@ -344,7 +293,7 @@ static int register_device_clock(struct acpi_device *adev,
344 kfree(parent); 293 kfree(parent);
345 kfree(clk_name); 294 kfree(clk_name);
346 } 295 }
347 296out:
348 if (IS_ERR(clk)) 297 if (IS_ERR(clk))
349 return PTR_ERR(clk); 298 return PTR_ERR(clk);
350 299
@@ -392,7 +341,10 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
392 341
393 pdata->dev_desc = dev_desc; 342 pdata->dev_desc = dev_desc;
394 343
395 if (dev_desc->clk_required) { 344 if (dev_desc->setup)
345 dev_desc->setup(pdata);
346
347 if (dev_desc->flags & LPSS_CLK) {
396 ret = register_device_clock(adev, pdata); 348 ret = register_device_clock(adev, pdata);
397 if (ret) { 349 if (ret) {
398 /* Skip the device, but continue the namespace scan. */ 350 /* Skip the device, but continue the namespace scan. */
@@ -413,9 +365,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
413 goto err_out; 365 goto err_out;
414 } 366 }
415 367
416 if (dev_desc->setup)
417 dev_desc->setup(pdata);
418
419 adev->driver_data = pdata; 368 adev->driver_data = pdata;
420 pdev = acpi_create_platform_device(adev); 369 pdev = acpi_create_platform_device(adev);
421 if (!IS_ERR_OR_NULL(pdev)) { 370 if (!IS_ERR_OR_NULL(pdev)) {
@@ -692,19 +641,19 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
692 641
693 switch (action) { 642 switch (action) {
694 case BUS_NOTIFY_BOUND_DRIVER: 643 case BUS_NOTIFY_BOUND_DRIVER:
695 if (pdata->dev_desc->save_ctx) 644 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
696 pdev->dev.pm_domain = &acpi_lpss_pm_domain; 645 pdev->dev.pm_domain = &acpi_lpss_pm_domain;
697 break; 646 break;
698 case BUS_NOTIFY_UNBOUND_DRIVER: 647 case BUS_NOTIFY_UNBOUND_DRIVER:
699 if (pdata->dev_desc->save_ctx) 648 if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
700 pdev->dev.pm_domain = NULL; 649 pdev->dev.pm_domain = NULL;
701 break; 650 break;
702 case BUS_NOTIFY_ADD_DEVICE: 651 case BUS_NOTIFY_ADD_DEVICE:
703 if (pdata->dev_desc->ltr_required) 652 if (pdata->dev_desc->flags & LPSS_LTR)
704 return sysfs_create_group(&pdev->dev.kobj, 653 return sysfs_create_group(&pdev->dev.kobj,
705 &lpss_attr_group); 654 &lpss_attr_group);
706 case BUS_NOTIFY_DEL_DEVICE: 655 case BUS_NOTIFY_DEL_DEVICE:
707 if (pdata->dev_desc->ltr_required) 656 if (pdata->dev_desc->flags & LPSS_LTR)
708 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group); 657 sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
709 default: 658 default:
710 break; 659 break;
@@ -721,7 +670,7 @@ static void acpi_lpss_bind(struct device *dev)
721{ 670{
722 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev)); 671 struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
723 672
724 if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required) 673 if (!pdata || !pdata->mmio_base || !(pdata->dev_desc->flags & LPSS_LTR))
725 return; 674 return;
726 675
727 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) 676 if (pdata->mmio_size >= pdata->dev_desc->prv_offset + LPSS_LTR_SIZE)
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 996fa1959eea..f30c40796856 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -132,10 +132,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
132 {"PNP0401"}, /* ECP Printer Port */ 132 {"PNP0401"}, /* ECP Printer Port */
133 /* apple-gmux */ 133 /* apple-gmux */
134 {"APP000B"}, 134 {"APP000B"},
135 /* fujitsu-laptop.c */
136 {"FUJ02bf"},
137 {"FUJ02B1"},
138 {"FUJ02E3"},
139 /* system */ 135 /* system */
140 {"PNP0c02"}, /* General ID for reserving resources */ 136 {"PNP0c02"}, /* General ID for reserving resources */
141 {"PNP0c01"}, /* memory controller */ 137 {"PNP0c01"}, /* memory controller */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 0cf159cc6e6d..56710a03c9b0 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -596,6 +596,38 @@ acpi_status acpi_enable_all_runtime_gpes(void)
596 596
597ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) 597ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes)
598 598
599/******************************************************************************
600 *
601 * FUNCTION: acpi_enable_all_wakeup_gpes
602 *
603 * PARAMETERS: None
604 *
605 * RETURN: Status
606 *
607 * DESCRIPTION: Enable all "wakeup" GPEs and disable all of the other GPEs, in
608 * all GPE blocks.
609 *
610 ******************************************************************************/
611
612acpi_status acpi_enable_all_wakeup_gpes(void)
613{
614 acpi_status status;
615
616 ACPI_FUNCTION_TRACE(acpi_enable_all_wakeup_gpes);
617
618 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
619 if (ACPI_FAILURE(status)) {
620 return_ACPI_STATUS(status);
621 }
622
623 status = acpi_hw_enable_all_wakeup_gpes();
624 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
625
626 return_ACPI_STATUS(status);
627}
628
629ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
630
599/******************************************************************************* 631/*******************************************************************************
600 * 632 *
601 * FUNCTION: acpi_install_gpe_block 633 * FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 2e6caabba07a..ea62d40fd161 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -396,11 +396,11 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
396 /* Examine each GPE Register within the block */ 396 /* Examine each GPE Register within the block */
397 397
398 for (i = 0; i < gpe_block->register_count; i++) { 398 for (i = 0; i < gpe_block->register_count; i++) {
399 if (!gpe_block->register_info[i].enable_for_wake) {
400 continue;
401 }
402 399
403 /* Enable all "wake" GPEs in this register */ 400 /*
401 * Enable all "wake" GPEs in this register and disable the
402 * remaining ones.
403 */
404 404
405 status = 405 status =
406 acpi_hw_write(gpe_block->register_info[i].enable_for_wake, 406 acpi_hw_write(gpe_block->register_info[i].enable_for_wake,
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 14cb6c0c8be2..5cd017c7ac0e 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -87,7 +87,9 @@ const char *acpi_gbl_io_decode[] = {
87 87
88const char *acpi_gbl_ll_decode[] = { 88const char *acpi_gbl_ll_decode[] = {
89 "ActiveHigh", 89 "ActiveHigh",
90 "ActiveLow" 90 "ActiveLow",
91 "ActiveBoth",
92 "Reserved"
91}; 93};
92 94
93const char *acpi_gbl_max_decode[] = { 95const char *acpi_gbl_max_decode[] = {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 5fdfe65fe165..8ec8a89a20ab 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -695,7 +695,7 @@ static void acpi_battery_quirks(struct acpi_battery *battery)
695 if (battery->power_unit && dmi_name_in_vendors("LENOVO")) { 695 if (battery->power_unit && dmi_name_in_vendors("LENOVO")) {
696 const char *s; 696 const char *s;
697 s = dmi_get_system_info(DMI_PRODUCT_VERSION); 697 s = dmi_get_system_info(DMI_PRODUCT_VERSION);
698 if (s && !strnicmp(s, "ThinkPad", 8)) { 698 if (s && !strncasecmp(s, "ThinkPad", 8)) {
699 dmi_walk(find_battery, battery); 699 dmi_walk(find_battery, battery);
700 if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH, 700 if (test_bit(ACPI_BATTERY_QUIRK_THINKPAD_MAH,
701 &battery->flags) && 701 &battery->flags) &&
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 36eb42e3b0bb..ed122e17636e 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -247,8 +247,8 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
247 }, 247 },
248 248
249 /* 249 /*
250 * These machines will power on immediately after shutdown when 250 * The wireless hotkey does not work on those machines when
251 * reporting the Windows 2012 OSI. 251 * returning true for _OSI("Windows 2012")
252 */ 252 */
253 { 253 {
254 .callback = dmi_disable_osi_win8, 254 .callback = dmi_disable_osi_win8,
@@ -258,6 +258,38 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
258 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), 258 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"),
259 }, 259 },
260 }, 260 },
261 {
262 .callback = dmi_disable_osi_win8,
263 .ident = "Dell Inspiron 7537",
264 .matches = {
265 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
266 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7537"),
267 },
268 },
269 {
270 .callback = dmi_disable_osi_win8,
271 .ident = "Dell Inspiron 5437",
272 .matches = {
273 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
274 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5437"),
275 },
276 },
277 {
278 .callback = dmi_disable_osi_win8,
279 .ident = "Dell Inspiron 3437",
280 .matches = {
281 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
282 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 3437"),
283 },
284 },
285 {
286 .callback = dmi_disable_osi_win8,
287 .ident = "Dell Vostro 3446",
288 .matches = {
289 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
290 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
291 },
292 },
261 293
262 /* 294 /*
263 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 295 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 8acf53e62966..5328b1090e08 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -27,12 +27,10 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <asm/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/thermal.h> 31#include <linux/thermal.h>
32#include <linux/acpi.h> 32#include <linux/acpi.h>
33 33
34#define PREFIX "ACPI: "
35
36#define ACPI_FAN_CLASS "fan" 34#define ACPI_FAN_CLASS "fan"
37#define ACPI_FAN_FILE_STATE "state" 35#define ACPI_FAN_FILE_STATE "state"
38 36
@@ -127,8 +125,9 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
127}; 125};
128 126
129/* -------------------------------------------------------------------------- 127/* --------------------------------------------------------------------------
130 Driver Interface 128 * Driver Interface
131 -------------------------------------------------------------------------- */ 129 * --------------------------------------------------------------------------
130*/
132 131
133static int acpi_fan_add(struct acpi_device *device) 132static int acpi_fan_add(struct acpi_device *device)
134{ 133{
@@ -143,7 +142,7 @@ static int acpi_fan_add(struct acpi_device *device)
143 142
144 result = acpi_bus_update_power(device->handle, NULL); 143 result = acpi_bus_update_power(device->handle, NULL);
145 if (result) { 144 if (result) {
146 printk(KERN_ERR PREFIX "Setting initial power state\n"); 145 dev_err(&device->dev, "Setting initial power state\n");
147 goto end; 146 goto end;
148 } 147 }
149 148
@@ -168,10 +167,9 @@ static int acpi_fan_add(struct acpi_device *device)
168 &device->dev.kobj, 167 &device->dev.kobj,
169 "device"); 168 "device");
170 if (result) 169 if (result)
171 dev_err(&device->dev, "Failed to create sysfs link " 170 dev_err(&device->dev, "Failed to create sysfs link 'device'\n");
172 "'device'\n");
173 171
174 printk(KERN_INFO PREFIX "%s [%s] (%s)\n", 172 dev_info(&device->dev, "ACPI: %s [%s] (%s)\n",
175 acpi_device_name(device), acpi_device_bid(device), 173 acpi_device_name(device), acpi_device_bid(device),
176 !device->power.state ? "on" : "off"); 174 !device->power.state ? "on" : "off");
177 175
@@ -217,7 +215,7 @@ static int acpi_fan_resume(struct device *dev)
217 215
218 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); 216 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL);
219 if (result) 217 if (result)
220 printk(KERN_ERR PREFIX "Error updating fan power state\n"); 218 dev_err(dev, "Error updating fan power state\n");
221 219
222 return result; 220 return result;
223} 221}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3abe9b223ba7..938b6ac71dde 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -152,6 +152,16 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
152 osi_linux.dmi ? " via DMI" : ""); 152 osi_linux.dmi ? " via DMI" : "");
153 } 153 }
154 154
155 if (!strcmp("Darwin", interface)) {
156 /*
157 * Apple firmware will behave poorly if it receives positive
158 * answers to "Darwin" and any other OS. Respond positively
159 * to Darwin and then disable all other vendor strings.
160 */
161 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
162 supported = ACPI_UINT32_MAX;
163 }
164
155 return supported; 165 return supported;
156} 166}
157 167
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index e6ae603ed1a1..cd4de7e038ea 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -35,6 +35,7 @@
35#include <linux/pci-aspm.h> 35#include <linux/pci-aspm.h>
36#include <linux/acpi.h> 36#include <linux/acpi.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/dmi.h>
38#include <acpi/apei.h> /* for acpi_hest_init() */ 39#include <acpi/apei.h> /* for acpi_hest_init() */
39 40
40#include "internal.h" 41#include "internal.h"
@@ -430,6 +431,19 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
430 acpi_handle handle = device->handle; 431 acpi_handle handle = device->handle;
431 432
432 /* 433 /*
434 * Apple always return failure on _OSC calls when _OSI("Darwin") has
435 * been called successfully. We know the feature set supported by the
436 * platform, so avoid calling _OSC at all
437 */
438
439 if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
440 root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
441 decode_osc_control(root, "OS assumes control of",
442 root->osc_control_set);
443 return;
444 }
445
446 /*
433 * All supported architectures that use ACPI have support for 447 * All supported architectures that use ACPI have support for
434 * PCI domains, so we indicate this in _OSC support capabilities. 448 * PCI domains, so we indicate this in _OSC support capabilities.
435 */ 449 */
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e32321ce9a5c..ef58f46c8442 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -16,7 +16,7 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
16 u32 acpi_id, int *apic_id) 16 u32 acpi_id, int *apic_id)
17{ 17{
18 struct acpi_madt_local_apic *lapic = 18 struct acpi_madt_local_apic *lapic =
19 (struct acpi_madt_local_apic *)entry; 19 container_of(entry, struct acpi_madt_local_apic, header);
20 20
21 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) 21 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
22 return -ENODEV; 22 return -ENODEV;
@@ -32,7 +32,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
32 int device_declaration, u32 acpi_id, int *apic_id) 32 int device_declaration, u32 acpi_id, int *apic_id)
33{ 33{
34 struct acpi_madt_local_x2apic *apic = 34 struct acpi_madt_local_x2apic *apic =
35 (struct acpi_madt_local_x2apic *)entry; 35 container_of(entry, struct acpi_madt_local_x2apic, header);
36 36
37 if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) 37 if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
38 return -ENODEV; 38 return -ENODEV;
@@ -49,7 +49,7 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
49 int device_declaration, u32 acpi_id, int *apic_id) 49 int device_declaration, u32 acpi_id, int *apic_id)
50{ 50{
51 struct acpi_madt_local_sapic *lsapic = 51 struct acpi_madt_local_sapic *lsapic =
52 (struct acpi_madt_local_sapic *)entry; 52 container_of(entry, struct acpi_madt_local_sapic, header);
53 53
54 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 54 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
55 return -ENODEV; 55 return -ENODEV;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 366ca40a6f70..a7a3edd28beb 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -35,6 +35,7 @@
35#include <linux/jiffies.h> 35#include <linux/jiffies.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/power_supply.h> 37#include <linux/power_supply.h>
38#include <linux/dmi.h>
38 39
39#include "sbshc.h" 40#include "sbshc.h"
40#include "battery.h" 41#include "battery.h"
@@ -61,6 +62,8 @@ static unsigned int cache_time = 1000;
61module_param(cache_time, uint, 0644); 62module_param(cache_time, uint, 0644);
62MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 63MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
63 64
65static bool sbs_manager_broken;
66
64#define MAX_SBS_BAT 4 67#define MAX_SBS_BAT 4
65#define ACPI_SBS_BLOCK_MAX 32 68#define ACPI_SBS_BLOCK_MAX 32
66 69
@@ -109,6 +112,7 @@ struct acpi_sbs {
109 u8 batteries_supported:4; 112 u8 batteries_supported:4;
110 u8 manager_present:1; 113 u8 manager_present:1;
111 u8 charger_present:1; 114 u8 charger_present:1;
115 u8 charger_exists:1;
112}; 116};
113 117
114#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger) 118#define to_acpi_sbs(x) container_of(x, struct acpi_sbs, charger)
@@ -429,9 +433,19 @@ static int acpi_ac_get_present(struct acpi_sbs *sbs)
429 433
430 result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER, 434 result = acpi_smbus_read(sbs->hc, SMBUS_READ_WORD, ACPI_SBS_CHARGER,
431 0x13, (u8 *) & status); 435 0x13, (u8 *) & status);
432 if (!result) 436
433 sbs->charger_present = (status >> 15) & 0x1; 437 if (result)
434 return result; 438 return result;
439
440 /*
441 * The spec requires that bit 4 always be 1. If it's not set, assume
442 * that the implementation doesn't support an SBS charger
443 */
444 if (!((status >> 4) & 0x1))
445 return -ENODEV;
446
447 sbs->charger_present = (status >> 15) & 0x1;
448 return 0;
435} 449}
436 450
437static ssize_t acpi_battery_alarm_show(struct device *dev, 451static ssize_t acpi_battery_alarm_show(struct device *dev,
@@ -483,16 +497,21 @@ static int acpi_battery_read(struct acpi_battery *battery)
483 ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2); 497 ACPI_SBS_MANAGER, 0x01, (u8 *)&state, 2);
484 } else if (battery->id == 0) 498 } else if (battery->id == 0)
485 battery->present = 1; 499 battery->present = 1;
500
486 if (result || !battery->present) 501 if (result || !battery->present)
487 return result; 502 return result;
488 503
489 if (saved_present != battery->present) { 504 if (saved_present != battery->present) {
490 battery->update_time = 0; 505 battery->update_time = 0;
491 result = acpi_battery_get_info(battery); 506 result = acpi_battery_get_info(battery);
492 if (result) 507 if (result) {
508 battery->present = 0;
493 return result; 509 return result;
510 }
494 } 511 }
495 result = acpi_battery_get_state(battery); 512 result = acpi_battery_get_state(battery);
513 if (result)
514 battery->present = 0;
496 return result; 515 return result;
497} 516}
498 517
@@ -524,6 +543,7 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
524 result = power_supply_register(&sbs->device->dev, &battery->bat); 543 result = power_supply_register(&sbs->device->dev, &battery->bat);
525 if (result) 544 if (result)
526 goto end; 545 goto end;
546
527 result = device_create_file(battery->bat.dev, &alarm_attr); 547 result = device_create_file(battery->bat.dev, &alarm_attr);
528 if (result) 548 if (result)
529 goto end; 549 goto end;
@@ -554,6 +574,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
554 if (result) 574 if (result)
555 goto end; 575 goto end;
556 576
577 sbs->charger_exists = 1;
557 sbs->charger.name = "sbs-charger"; 578 sbs->charger.name = "sbs-charger";
558 sbs->charger.type = POWER_SUPPLY_TYPE_MAINS; 579 sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
559 sbs->charger.properties = sbs_ac_props; 580 sbs->charger.properties = sbs_ac_props;
@@ -580,9 +601,12 @@ static void acpi_sbs_callback(void *context)
580 struct acpi_battery *bat; 601 struct acpi_battery *bat;
581 u8 saved_charger_state = sbs->charger_present; 602 u8 saved_charger_state = sbs->charger_present;
582 u8 saved_battery_state; 603 u8 saved_battery_state;
583 acpi_ac_get_present(sbs); 604
584 if (sbs->charger_present != saved_charger_state) 605 if (sbs->charger_exists) {
585 kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE); 606 acpi_ac_get_present(sbs);
607 if (sbs->charger_present != saved_charger_state)
608 kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
609 }
586 610
587 if (sbs->manager_present) { 611 if (sbs->manager_present) {
588 for (id = 0; id < MAX_SBS_BAT; ++id) { 612 for (id = 0; id < MAX_SBS_BAT; ++id) {
@@ -598,12 +622,31 @@ static void acpi_sbs_callback(void *context)
598 } 622 }
599} 623}
600 624
625static int disable_sbs_manager(const struct dmi_system_id *d)
626{
627 sbs_manager_broken = true;
628 return 0;
629}
630
631static struct dmi_system_id acpi_sbs_dmi_table[] = {
632 {
633 .callback = disable_sbs_manager,
634 .ident = "Apple",
635 .matches = {
636 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
637 },
638 },
639 { },
640};
641
601static int acpi_sbs_add(struct acpi_device *device) 642static int acpi_sbs_add(struct acpi_device *device)
602{ 643{
603 struct acpi_sbs *sbs; 644 struct acpi_sbs *sbs;
604 int result = 0; 645 int result = 0;
605 int id; 646 int id;
606 647
648 dmi_check_system(acpi_sbs_dmi_table);
649
607 sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL); 650 sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
608 if (!sbs) { 651 if (!sbs) {
609 result = -ENOMEM; 652 result = -ENOMEM;
@@ -619,17 +662,24 @@ static int acpi_sbs_add(struct acpi_device *device)
619 device->driver_data = sbs; 662 device->driver_data = sbs;
620 663
621 result = acpi_charger_add(sbs); 664 result = acpi_charger_add(sbs);
622 if (result) 665 if (result && result != -ENODEV)
623 goto end; 666 goto end;
624 667
625 result = acpi_manager_get_info(sbs); 668 result = 0;
626 if (!result) { 669
627 sbs->manager_present = 1; 670 if (!sbs_manager_broken) {
628 for (id = 0; id < MAX_SBS_BAT; ++id) 671 result = acpi_manager_get_info(sbs);
629 if ((sbs->batteries_supported & (1 << id))) 672 if (!result) {
630 acpi_battery_add(sbs, id); 673 sbs->manager_present = 0;
631 } else 674 for (id = 0; id < MAX_SBS_BAT; ++id)
675 if ((sbs->batteries_supported & (1 << id)))
676 acpi_battery_add(sbs, id);
677 }
678 }
679
680 if (!sbs->manager_present)
632 acpi_battery_add(sbs, 0); 681 acpi_battery_add(sbs, 0);
682
633 acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs); 683 acpi_smbus_register_callback(sbs->hc, acpi_sbs_callback, sbs);
634 end: 684 end:
635 if (result) 685 if (result)
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 07c8c5a5ee95..834f35c4bf8d 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -661,7 +661,6 @@ EXPORT_SYMBOL(acpi_evaluate_dsm);
661 * @uuid: UUID of requested functions, should be 16 bytes at least 661 * @uuid: UUID of requested functions, should be 16 bytes at least
662 * @rev: revision number of requested functions 662 * @rev: revision number of requested functions
663 * @funcs: bitmap of requested functions 663 * @funcs: bitmap of requested functions
664 * @exclude: excluding special value, used to support i915 and nouveau
665 * 664 *
666 * Evaluate device's _DSM method to check whether it supports requested 665 * Evaluate device's _DSM method to check whether it supports requested
667 * functions. Currently only support 64 functions at maximum, should be 666 * functions. Currently only support 64 functions at maximum, should be
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 8e7e18567ae6..807a88a0f394 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -411,12 +411,6 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
411 return 0; 411 return 0;
412} 412}
413 413
414static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
415{
416 use_native_backlight_dmi = true;
417 return 0;
418}
419
420static int __init video_disable_native_backlight(const struct dmi_system_id *d) 414static int __init video_disable_native_backlight(const struct dmi_system_id *d)
421{ 415{
422 use_native_backlight_dmi = false; 416 use_native_backlight_dmi = false;
@@ -467,265 +461,6 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
467 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"), 461 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
468 }, 462 },
469 }, 463 },
470 {
471 .callback = video_set_use_native_backlight,
472 .ident = "ThinkPad X230",
473 .matches = {
474 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
475 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
476 },
477 },
478 {
479 .callback = video_set_use_native_backlight,
480 .ident = "ThinkPad T430 and T430s",
481 .matches = {
482 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
483 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"),
484 },
485 },
486 {
487 .callback = video_set_use_native_backlight,
488 .ident = "ThinkPad T430",
489 .matches = {
490 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
491 DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
492 },
493 },
494 {
495 .callback = video_set_use_native_backlight,
496 .ident = "ThinkPad T431s",
497 .matches = {
498 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
499 DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
500 },
501 },
502 {
503 .callback = video_set_use_native_backlight,
504 .ident = "ThinkPad Edge E530",
505 .matches = {
506 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
507 DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
508 },
509 },
510 {
511 .callback = video_set_use_native_backlight,
512 .ident = "ThinkPad Edge E530",
513 .matches = {
514 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
515 DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
516 },
517 },
518 {
519 .callback = video_set_use_native_backlight,
520 .ident = "ThinkPad Edge E530",
521 .matches = {
522 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
523 DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
524 },
525 },
526 {
527 .callback = video_set_use_native_backlight,
528 .ident = "ThinkPad W530",
529 .matches = {
530 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
531 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
532 },
533 },
534 {
535 .callback = video_set_use_native_backlight,
536 .ident = "ThinkPad X1 Carbon",
537 .matches = {
538 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
539 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
540 },
541 },
542 {
543 .callback = video_set_use_native_backlight,
544 .ident = "Lenovo Yoga 13",
545 .matches = {
546 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
547 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
548 },
549 },
550 {
551 .callback = video_set_use_native_backlight,
552 .ident = "Lenovo Yoga 2 11",
553 .matches = {
554 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
555 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2 11"),
556 },
557 },
558 {
559 .callback = video_set_use_native_backlight,
560 .ident = "Thinkpad Helix",
561 .matches = {
562 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
563 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
564 },
565 },
566 {
567 .callback = video_set_use_native_backlight,
568 .ident = "Dell Inspiron 7520",
569 .matches = {
570 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
571 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
572 },
573 },
574 {
575 .callback = video_set_use_native_backlight,
576 .ident = "Acer Aspire 5733Z",
577 .matches = {
578 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
579 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
580 },
581 },
582 {
583 .callback = video_set_use_native_backlight,
584 .ident = "Acer Aspire 5742G",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
587 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"),
588 },
589 },
590 {
591 .callback = video_set_use_native_backlight,
592 .ident = "Acer Aspire V5-171",
593 .matches = {
594 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
595 DMI_MATCH(DMI_PRODUCT_NAME, "V5-171"),
596 },
597 },
598 {
599 .callback = video_set_use_native_backlight,
600 .ident = "Acer Aspire V5-431",
601 .matches = {
602 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
603 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
604 },
605 },
606 {
607 .callback = video_set_use_native_backlight,
608 .ident = "Acer Aspire V5-471G",
609 .matches = {
610 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
611 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"),
612 },
613 },
614 {
615 .callback = video_set_use_native_backlight,
616 .ident = "Acer TravelMate B113",
617 .matches = {
618 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
619 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
620 },
621 },
622 {
623 .callback = video_set_use_native_backlight,
624 .ident = "Acer Aspire V5-572G",
625 .matches = {
626 DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
627 DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
628 },
629 },
630 {
631 .callback = video_set_use_native_backlight,
632 .ident = "Acer Aspire V5-573G",
633 .matches = {
634 DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
635 DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
636 },
637 },
638 {
639 .callback = video_set_use_native_backlight,
640 .ident = "ASUS Zenbook Prime UX31A",
641 .matches = {
642 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
643 DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
644 },
645 },
646 {
647 .callback = video_set_use_native_backlight,
648 .ident = "HP ProBook 4340s",
649 .matches = {
650 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
651 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
652 },
653 },
654 {
655 .callback = video_set_use_native_backlight,
656 .ident = "HP ProBook 4540s",
657 .matches = {
658 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
659 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
660 },
661 },
662 {
663 .callback = video_set_use_native_backlight,
664 .ident = "HP ProBook 2013 models",
665 .matches = {
666 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
667 DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
668 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
669 },
670 },
671 {
672 .callback = video_set_use_native_backlight,
673 .ident = "HP EliteBook 2013 models",
674 .matches = {
675 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
676 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
677 DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
678 },
679 },
680 {
681 .callback = video_set_use_native_backlight,
682 .ident = "HP EliteBook 2014 models",
683 .matches = {
684 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
685 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
686 DMI_MATCH(DMI_PRODUCT_NAME, " G2"),
687 },
688 },
689 {
690 .callback = video_set_use_native_backlight,
691 .ident = "HP ZBook 14",
692 .matches = {
693 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
694 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
695 },
696 },
697 {
698 .callback = video_set_use_native_backlight,
699 .ident = "HP ZBook 15",
700 .matches = {
701 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
702 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
703 },
704 },
705 {
706 .callback = video_set_use_native_backlight,
707 .ident = "HP ZBook 17",
708 .matches = {
709 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
710 DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
711 },
712 },
713 {
714 .callback = video_set_use_native_backlight,
715 .ident = "HP EliteBook 8470p",
716 .matches = {
717 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
718 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8470p"),
719 },
720 },
721 {
722 .callback = video_set_use_native_backlight,
723 .ident = "HP EliteBook 8780w",
724 .matches = {
725 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
726 DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
727 },
728 },
729 464
730 /* 465 /*
731 * These models have a working acpi_video backlight control, and using 466 * These models have a working acpi_video backlight control, and using
@@ -1419,6 +1154,23 @@ acpi_video_device_bind(struct acpi_video_bus *video,
1419 } 1154 }
1420} 1155}
1421 1156
1157static bool acpi_video_device_in_dod(struct acpi_video_device *device)
1158{
1159 struct acpi_video_bus *video = device->video;
1160 int i;
1161
1162 /* If we have a broken _DOD, no need to test */
1163 if (!video->attached_count)
1164 return true;
1165
1166 for (i = 0; i < video->attached_count; i++) {
1167 if (video->attached_array[i].bind_info == device)
1168 return true;
1169 }
1170
1171 return false;
1172}
1173
1422/* 1174/*
1423 * Arg: 1175 * Arg:
1424 * video : video bus device 1176 * video : video bus device
@@ -1858,6 +1610,15 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
1858 static int count; 1610 static int count;
1859 char *name; 1611 char *name;
1860 1612
1613 /*
1614 * Do not create backlight device for video output
1615 * device that is not in the enumerated list.
1616 */
1617 if (!acpi_video_device_in_dod(device)) {
1618 dev_dbg(&device->dev->dev, "not in _DOD list, ignore\n");
1619 return;
1620 }
1621
1861 result = acpi_video_init_brightness(device); 1622 result = acpi_video_init_brightness(device);
1862 if (result) 1623 if (result)
1863 return; 1624 return;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index c42feb2bacd0..27c43499977a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -174,6 +174,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
174 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), 174 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
175 }, 175 },
176 }, 176 },
177 {
178 .callback = video_detect_force_vendor,
179 .ident = "Lenovo IdeaPad Z570",
180 .matches = {
181 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
182 DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
183 },
184 },
177 { }, 185 { },
178}; 186};
179 187
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb1bd2ecad8b..c2744b30d5d9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@
24 */ 24 */
25bool events_check_enabled __read_mostly; 25bool events_check_enabled __read_mostly;
26 26
27/* If set and the system is suspending, terminate the suspend. */
28static bool pm_abort_suspend __read_mostly;
29
27/* 30/*
28 * Combined counters of registered wakeup events and wakeup events in progress. 31 * Combined counters of registered wakeup events and wakeup events in progress.
29 * They need to be modified together atomically, so it's better to use one 32 * They need to be modified together atomically, so it's better to use one
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void)
719 pm_print_active_wakeup_sources(); 722 pm_print_active_wakeup_sources();
720 } 723 }
721 724
722 return ret; 725 return ret || pm_abort_suspend;
726}
727
728void pm_system_wakeup(void)
729{
730 pm_abort_suspend = true;
731 freeze_wake();
732}
733
734void pm_wakeup_clear(void)
735{
736 pm_abort_suspend = false;
723} 737}
724 738
725/** 739/**
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index dbb8350ea8dc..8d98a329f6ea 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,7 +9,7 @@
9#include <linux/syscore_ops.h> 9#include <linux/syscore_ops.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/interrupt.h> 12#include <linux/suspend.h>
13#include <trace/events/power.h> 13#include <trace/events/power.h>
14 14
15static LIST_HEAD(syscore_ops_list); 15static LIST_HEAD(syscore_ops_list);
@@ -54,9 +54,8 @@ int syscore_suspend(void)
54 pr_debug("Checking wakeup interrupts\n"); 54 pr_debug("Checking wakeup interrupts\n");
55 55
56 /* Return error code if there are any wakeup interrupts pending. */ 56 /* Return error code if there are any wakeup interrupts pending. */
57 ret = check_wakeup_irqs(); 57 if (pm_wakeup_pending())
58 if (ret) 58 return -EBUSY;
59 return ret;
60 59
61 WARN_ONCE(!irqs_disabled(), 60 WARN_ONCE(!irqs_disabled(),
62 "Interrupts enabled before system core suspend.\n"); 61 "Interrupts enabled before system core suspend.\n");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e93e7f98358..61190f6b4829 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1658,10 +1658,8 @@ void cpufreq_suspend(void)
1658 if (!cpufreq_driver) 1658 if (!cpufreq_driver)
1659 return; 1659 return;
1660 1660
1661 cpufreq_suspended = true;
1662
1663 if (!has_target()) 1661 if (!has_target())
1664 return; 1662 goto suspend;
1665 1663
1666 pr_debug("%s: Suspending Governors\n", __func__); 1664 pr_debug("%s: Suspending Governors\n", __func__);
1667 1665
@@ -1674,6 +1672,9 @@ void cpufreq_suspend(void)
1674 pr_err("%s: Failed to suspend driver: %p\n", __func__, 1672 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1675 policy); 1673 policy);
1676 } 1674 }
1675
1676suspend:
1677 cpufreq_suspended = true;
1677} 1678}
1678 1679
1679/** 1680/**
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index c1320528b9d0..6bd69adc3c5e 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -213,9 +213,9 @@ static int __init integrator_cpufreq_probe(struct platform_device *pdev)
213 return cpufreq_register_driver(&integrator_driver); 213 return cpufreq_register_driver(&integrator_driver);
214} 214}
215 215
216static void __exit integrator_cpufreq_remove(struct platform_device *pdev) 216static int __exit integrator_cpufreq_remove(struct platform_device *pdev)
217{ 217{
218 cpufreq_unregister_driver(&integrator_driver); 218 return cpufreq_unregister_driver(&integrator_driver);
219} 219}
220 220
221static const struct of_device_id integrator_cpufreq_match[] = { 221static const struct of_device_id integrator_cpufreq_match[] = {
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 728a2d879499..4d2c8e861089 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -204,7 +204,6 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
204 u32 input_buffer; 204 u32 input_buffer;
205 int cpu; 205 int cpu;
206 206
207 spin_lock(&pcc_lock);
208 cpu = policy->cpu; 207 cpu = policy->cpu;
209 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); 208 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
210 209
@@ -216,6 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
216 freqs.old = policy->cur; 215 freqs.old = policy->cur;
217 freqs.new = target_freq; 216 freqs.new = target_freq;
218 cpufreq_freq_transition_begin(policy, &freqs); 217 cpufreq_freq_transition_begin(policy, &freqs);
218 spin_lock(&pcc_lock);
219 219
220 input_buffer = 0x1 | (((target_freq * 100) 220 input_buffer = 0x1 | (((target_freq * 100)
221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); 221 / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1411613f2174..e42925f76b4b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1310,6 +1310,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
1310 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); 1310 POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
1311} 1311}
1312 1312
1313static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
1314{
1315 if (INTEL_INFO(dev_priv->dev)->gen < 6) {
1316 intel_gtt_chipset_flush();
1317 } else {
1318 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1319 POSTING_READ(GFX_FLSH_CNTL_GEN6);
1320 }
1321}
1322
1313void i915_gem_suspend_gtt_mappings(struct drm_device *dev) 1323void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1314{ 1324{
1315 struct drm_i915_private *dev_priv = dev->dev_private; 1325 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1326,6 +1336,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1326 dev_priv->gtt.base.start, 1336 dev_priv->gtt.base.start,
1327 dev_priv->gtt.base.total, 1337 dev_priv->gtt.base.total,
1328 true); 1338 true);
1339
1340 i915_ggtt_flush(dev_priv);
1329} 1341}
1330 1342
1331void i915_gem_restore_gtt_mappings(struct drm_device *dev) 1343void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -1378,7 +1390,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1378 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); 1390 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
1379 } 1391 }
1380 1392
1381 i915_gem_chipset_flush(dev); 1393 i915_ggtt_flush(dev_priv);
1382} 1394}
1383 1395
1384int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 1396int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index ca52ad2ae7d1..d8de1d5140a7 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
396 return -EINVAL; 396 return -EINVAL;
397} 397}
398 398
399/*
400 * If the vendor backlight interface is not in use and ACPI backlight interface
401 * is broken, do not bother processing backlight change requests from firmware.
402 */
403static bool should_ignore_backlight_request(void)
404{
405 return acpi_video_backlight_support() &&
406 !acpi_video_verify_backlight_support();
407}
408
399static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 409static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
400{ 410{
401 struct drm_i915_private *dev_priv = dev->dev_private; 411 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
404 414
405 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 415 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
406 416
407 /* 417 if (should_ignore_backlight_request()) {
408 * If the acpi_video interface is not supposed to be used, don't
409 * bother processing backlight level change requests from firmware.
410 */
411 if (!acpi_video_verify_backlight_support()) {
412 DRM_DEBUG_KMS("opregion backlight request ignored\n"); 418 DRM_DEBUG_KMS("opregion backlight request ignored\n");
413 return 0; 419 return 0;
414 } 420 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 4b5bb5d58a54..f8cbb512132f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1763,9 +1763,10 @@ nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp
1763 const int or = ffs(outp->or) - 1; 1763 const int or = ffs(outp->or) - 1;
1764 const u32 loff = (or * 0x800) + (link * 0x80); 1764 const u32 loff = (or * 0x800) + (link * 0x80);
1765 const u16 mask = (outp->sorconf.link << 6) | outp->or; 1765 const u16 mask = (outp->sorconf.link << 6) | outp->or;
1766 struct dcb_output match;
1766 u8 ver, hdr; 1767 u8 ver, hdr;
1767 1768
1768 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp)) 1769 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, &match))
1769 nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000); 1770 nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
1770} 1771}
1771 1772
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 99cd9e4a2aa6..3440fc999f2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -285,6 +285,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
285 struct nouveau_software_chan *swch; 285 struct nouveau_software_chan *swch;
286 struct nv_dma_v0 args = {}; 286 struct nv_dma_v0 args = {};
287 int ret, i; 287 int ret, i;
288 bool save;
288 289
289 nvif_object_map(chan->object); 290 nvif_object_map(chan->object);
290 291
@@ -386,7 +387,11 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
386 } 387 }
387 388
388 /* initialise synchronisation */ 389 /* initialise synchronisation */
389 return nouveau_fence(chan->drm)->context_new(chan); 390 save = cli->base.super;
391 cli->base.super = true; /* hack until fencenv50 fixed */
392 ret = nouveau_fence(chan->drm)->context_new(chan);
393 cli->base.super = save;
394 return ret;
390} 395}
391 396
392int 397int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 65b4fd53dd4e..4a21b2b06ce2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -550,14 +550,12 @@ nouveau_display_destroy(struct drm_device *dev)
550} 550}
551 551
552int 552int
553nouveau_display_suspend(struct drm_device *dev) 553nouveau_display_suspend(struct drm_device *dev, bool runtime)
554{ 554{
555 struct nouveau_drm *drm = nouveau_drm(dev);
556 struct drm_crtc *crtc; 555 struct drm_crtc *crtc;
557 556
558 nouveau_display_fini(dev); 557 nouveau_display_fini(dev);
559 558
560 NV_INFO(drm, "unpinning framebuffer(s)...\n");
561 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 559 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
562 struct nouveau_framebuffer *nouveau_fb; 560 struct nouveau_framebuffer *nouveau_fb;
563 561
@@ -579,12 +577,13 @@ nouveau_display_suspend(struct drm_device *dev)
579} 577}
580 578
581void 579void
582nouveau_display_repin(struct drm_device *dev) 580nouveau_display_resume(struct drm_device *dev, bool runtime)
583{ 581{
584 struct nouveau_drm *drm = nouveau_drm(dev); 582 struct nouveau_drm *drm = nouveau_drm(dev);
585 struct drm_crtc *crtc; 583 struct drm_crtc *crtc;
586 int ret; 584 int ret, head;
587 585
586 /* re-pin fb/cursors */
588 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 587 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
589 struct nouveau_framebuffer *nouveau_fb; 588 struct nouveau_framebuffer *nouveau_fb;
590 589
@@ -606,13 +605,6 @@ nouveau_display_repin(struct drm_device *dev)
606 if (ret) 605 if (ret)
607 NV_ERROR(drm, "Could not pin/map cursor.\n"); 606 NV_ERROR(drm, "Could not pin/map cursor.\n");
608 } 607 }
609}
610
611void
612nouveau_display_resume(struct drm_device *dev)
613{
614 struct drm_crtc *crtc;
615 int head;
616 608
617 nouveau_display_init(dev); 609 nouveau_display_init(dev);
618 610
@@ -627,6 +619,13 @@ nouveau_display_resume(struct drm_device *dev)
627 for (head = 0; head < dev->mode_config.num_crtc; head++) 619 for (head = 0; head < dev->mode_config.num_crtc; head++)
628 drm_vblank_on(dev, head); 620 drm_vblank_on(dev, head);
629 621
622 /* This should ensure we don't hit a locking problem when someone
623 * wakes us up via a connector. We should never go into suspend
624 * while the display is on anyways.
625 */
626 if (runtime)
627 return;
628
630 drm_helper_resume_force_mode(dev); 629 drm_helper_resume_force_mode(dev);
631 630
632 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 631 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 88ca177cb1c7..be3d5947c6be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -63,9 +63,8 @@ int nouveau_display_create(struct drm_device *dev);
63void nouveau_display_destroy(struct drm_device *dev); 63void nouveau_display_destroy(struct drm_device *dev);
64int nouveau_display_init(struct drm_device *dev); 64int nouveau_display_init(struct drm_device *dev);
65void nouveau_display_fini(struct drm_device *dev); 65void nouveau_display_fini(struct drm_device *dev);
66int nouveau_display_suspend(struct drm_device *dev); 66int nouveau_display_suspend(struct drm_device *dev, bool runtime);
67void nouveau_display_repin(struct drm_device *dev); 67void nouveau_display_resume(struct drm_device *dev, bool runtime);
68void nouveau_display_resume(struct drm_device *dev);
69int nouveau_display_vblank_enable(struct drm_device *, int); 68int nouveau_display_vblank_enable(struct drm_device *, int);
70void nouveau_display_vblank_disable(struct drm_device *, int); 69void nouveau_display_vblank_disable(struct drm_device *, int);
71int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int, 70int nouveau_display_scanoutpos(struct drm_device *, int, unsigned int,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 9c3af96a7153..3ed32dd90303 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -547,9 +547,11 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
547 struct nouveau_cli *cli; 547 struct nouveau_cli *cli;
548 int ret; 548 int ret;
549 549
550 if (dev->mode_config.num_crtc && !runtime) { 550 if (dev->mode_config.num_crtc) {
551 NV_INFO(drm, "suspending console...\n");
552 nouveau_fbcon_set_suspend(dev, 1);
551 NV_INFO(drm, "suspending display...\n"); 553 NV_INFO(drm, "suspending display...\n");
552 ret = nouveau_display_suspend(dev); 554 ret = nouveau_display_suspend(dev, runtime);
553 if (ret) 555 if (ret)
554 return ret; 556 return ret;
555 } 557 }
@@ -603,7 +605,7 @@ fail_client:
603fail_display: 605fail_display:
604 if (dev->mode_config.num_crtc) { 606 if (dev->mode_config.num_crtc) {
605 NV_INFO(drm, "resuming display...\n"); 607 NV_INFO(drm, "resuming display...\n");
606 nouveau_display_resume(dev); 608 nouveau_display_resume(dev, runtime);
607 } 609 }
608 return ret; 610 return ret;
609} 611}
@@ -618,9 +620,6 @@ int nouveau_pmops_suspend(struct device *dev)
618 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF) 620 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
619 return 0; 621 return 0;
620 622
621 if (drm_dev->mode_config.num_crtc)
622 nouveau_fbcon_set_suspend(drm_dev, 1);
623
624 ret = nouveau_do_suspend(drm_dev, false); 623 ret = nouveau_do_suspend(drm_dev, false);
625 if (ret) 624 if (ret)
626 return ret; 625 return ret;
@@ -633,7 +632,7 @@ int nouveau_pmops_suspend(struct device *dev)
633} 632}
634 633
635static int 634static int
636nouveau_do_resume(struct drm_device *dev) 635nouveau_do_resume(struct drm_device *dev, bool runtime)
637{ 636{
638 struct nouveau_drm *drm = nouveau_drm(dev); 637 struct nouveau_drm *drm = nouveau_drm(dev);
639 struct nouveau_cli *cli; 638 struct nouveau_cli *cli;
@@ -658,7 +657,9 @@ nouveau_do_resume(struct drm_device *dev)
658 657
659 if (dev->mode_config.num_crtc) { 658 if (dev->mode_config.num_crtc) {
660 NV_INFO(drm, "resuming display...\n"); 659 NV_INFO(drm, "resuming display...\n");
661 nouveau_display_repin(dev); 660 nouveau_display_resume(dev, runtime);
661 NV_INFO(drm, "resuming console...\n");
662 nouveau_fbcon_set_suspend(dev, 0);
662 } 663 }
663 664
664 return 0; 665 return 0;
@@ -681,47 +682,21 @@ int nouveau_pmops_resume(struct device *dev)
681 return ret; 682 return ret;
682 pci_set_master(pdev); 683 pci_set_master(pdev);
683 684
684 ret = nouveau_do_resume(drm_dev); 685 return nouveau_do_resume(drm_dev, false);
685 if (ret)
686 return ret;
687
688 if (drm_dev->mode_config.num_crtc) {
689 nouveau_display_resume(drm_dev);
690 nouveau_fbcon_set_suspend(drm_dev, 0);
691 }
692
693 return 0;
694} 686}
695 687
696static int nouveau_pmops_freeze(struct device *dev) 688static int nouveau_pmops_freeze(struct device *dev)
697{ 689{
698 struct pci_dev *pdev = to_pci_dev(dev); 690 struct pci_dev *pdev = to_pci_dev(dev);
699 struct drm_device *drm_dev = pci_get_drvdata(pdev); 691 struct drm_device *drm_dev = pci_get_drvdata(pdev);
700 int ret; 692 return nouveau_do_suspend(drm_dev, false);
701
702 if (drm_dev->mode_config.num_crtc)
703 nouveau_fbcon_set_suspend(drm_dev, 1);
704
705 ret = nouveau_do_suspend(drm_dev, false);
706 return ret;
707} 693}
708 694
709static int nouveau_pmops_thaw(struct device *dev) 695static int nouveau_pmops_thaw(struct device *dev)
710{ 696{
711 struct pci_dev *pdev = to_pci_dev(dev); 697 struct pci_dev *pdev = to_pci_dev(dev);
712 struct drm_device *drm_dev = pci_get_drvdata(pdev); 698 struct drm_device *drm_dev = pci_get_drvdata(pdev);
713 int ret; 699 return nouveau_do_resume(drm_dev, false);
714
715 ret = nouveau_do_resume(drm_dev);
716 if (ret)
717 return ret;
718
719 if (drm_dev->mode_config.num_crtc) {
720 nouveau_display_resume(drm_dev);
721 nouveau_fbcon_set_suspend(drm_dev, 0);
722 }
723
724 return 0;
725} 700}
726 701
727 702
@@ -977,7 +952,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
977 return ret; 952 return ret;
978 pci_set_master(pdev); 953 pci_set_master(pdev);
979 954
980 ret = nouveau_do_resume(drm_dev); 955 ret = nouveau_do_resume(drm_dev, true);
981 drm_kms_helper_poll_enable(drm_dev); 956 drm_kms_helper_poll_enable(drm_dev);
982 /* do magic */ 957 /* do magic */
983 nvif_mask(device, 0x88488, (1 << 25), (1 << 25)); 958 nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 8bdd27091db8..49fe6075cc7c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -486,6 +486,16 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
486 .fb_probe = nouveau_fbcon_create, 486 .fb_probe = nouveau_fbcon_create,
487}; 487};
488 488
489static void
490nouveau_fbcon_set_suspend_work(struct work_struct *work)
491{
492 struct nouveau_fbdev *fbcon = container_of(work, typeof(*fbcon), work);
493 console_lock();
494 nouveau_fbcon_accel_restore(fbcon->dev);
495 nouveau_fbcon_zfill(fbcon->dev, fbcon);
496 fb_set_suspend(fbcon->helper.fbdev, FBINFO_STATE_RUNNING);
497 console_unlock();
498}
489 499
490int 500int
491nouveau_fbcon_init(struct drm_device *dev) 501nouveau_fbcon_init(struct drm_device *dev)
@@ -503,6 +513,7 @@ nouveau_fbcon_init(struct drm_device *dev)
503 if (!fbcon) 513 if (!fbcon)
504 return -ENOMEM; 514 return -ENOMEM;
505 515
516 INIT_WORK(&fbcon->work, nouveau_fbcon_set_suspend_work);
506 fbcon->dev = dev; 517 fbcon->dev = dev;
507 drm->fbcon = fbcon; 518 drm->fbcon = fbcon;
508 519
@@ -551,14 +562,14 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
551{ 562{
552 struct nouveau_drm *drm = nouveau_drm(dev); 563 struct nouveau_drm *drm = nouveau_drm(dev);
553 if (drm->fbcon) { 564 if (drm->fbcon) {
554 console_lock(); 565 if (state == FBINFO_STATE_RUNNING) {
555 if (state == 0) { 566 schedule_work(&drm->fbcon->work);
556 nouveau_fbcon_accel_restore(dev); 567 return;
557 nouveau_fbcon_zfill(dev, drm->fbcon);
558 } 568 }
569 flush_work(&drm->fbcon->work);
570 console_lock();
559 fb_set_suspend(drm->fbcon->helper.fbdev, state); 571 fb_set_suspend(drm->fbcon->helper.fbdev, state);
560 if (state == 1) 572 nouveau_fbcon_accel_save_disable(dev);
561 nouveau_fbcon_accel_save_disable(dev);
562 console_unlock(); 573 console_unlock();
563 } 574 }
564} 575}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 34658cfa8f5d..0b465c7d3907 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -36,6 +36,7 @@ struct nouveau_fbdev {
36 struct nouveau_framebuffer nouveau_fb; 36 struct nouveau_framebuffer nouveau_fb;
37 struct list_head fbdev_list; 37 struct list_head fbdev_list;
38 struct drm_device *dev; 38 struct drm_device *dev;
39 struct work_struct work;
39 unsigned int saved_flags; 40 unsigned int saved_flags;
40 struct nvif_object surf2d; 41 struct nvif_object surf2d;
41 struct nvif_object clip; 42 struct nvif_object clip;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 3a4d64e1dfb1..092d89bd3224 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -674,16 +674,20 @@ static int qup_i2c_probe(struct platform_device *pdev)
674 qup->adap.dev.of_node = pdev->dev.of_node; 674 qup->adap.dev.of_node = pdev->dev.of_node;
675 strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name)); 675 strlcpy(qup->adap.name, "QUP I2C adapter", sizeof(qup->adap.name));
676 676
677 ret = i2c_add_adapter(&qup->adap);
678 if (ret)
679 goto fail;
680
681 pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC); 677 pm_runtime_set_autosuspend_delay(qup->dev, MSEC_PER_SEC);
682 pm_runtime_use_autosuspend(qup->dev); 678 pm_runtime_use_autosuspend(qup->dev);
683 pm_runtime_set_active(qup->dev); 679 pm_runtime_set_active(qup->dev);
684 pm_runtime_enable(qup->dev); 680 pm_runtime_enable(qup->dev);
681
682 ret = i2c_add_adapter(&qup->adap);
683 if (ret)
684 goto fail_runtime;
685
685 return 0; 686 return 0;
686 687
688fail_runtime:
689 pm_runtime_disable(qup->dev);
690 pm_runtime_set_suspended(qup->dev);
687fail: 691fail:
688 qup_i2c_disable_clocks(qup); 692 qup_i2c_disable_clocks(qup);
689 return ret; 693 return ret;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 93cfc837200b..b38b0529946a 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -238,7 +238,7 @@ static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
238 for (i = 0; i < 8; ++i) { 238 for (i = 0; i < 8; ++i) {
239 val = 0; 239 val = 0;
240 for (j = 0; j < 4; ++j) { 240 for (j = 0; j < 4; ++j) {
241 if (i2c->processed == i2c->msg->len) 241 if ((i2c->processed == i2c->msg->len) && (cnt != 0))
242 break; 242 break;
243 243
244 if (i2c->processed == 0 && cnt == 0) 244 if (i2c->processed == 0 && cnt == 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 183588b11fc1..9f0fbecd1eb5 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -64,6 +64,10 @@
64#define cpu_to_group(cpu) cpu_to_node(cpu) 64#define cpu_to_group(cpu) cpu_to_node(cpu)
65#define ANY_GROUP NUMA_NO_NODE 65#define ANY_GROUP NUMA_NO_NODE
66 66
67static bool devices_handle_discard_safely = false;
68module_param(devices_handle_discard_safely, bool, 0644);
69MODULE_PARM_DESC(devices_handle_discard_safely,
70 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
67static struct workqueue_struct *raid5_wq; 71static struct workqueue_struct *raid5_wq;
68/* 72/*
69 * Stripe cache 73 * Stripe cache
@@ -6208,7 +6212,7 @@ static int run(struct mddev *mddev)
6208 mddev->queue->limits.discard_granularity = stripe; 6212 mddev->queue->limits.discard_granularity = stripe;
6209 /* 6213 /*
6210 * unaligned part of discard request will be ignored, so can't 6214 * unaligned part of discard request will be ignored, so can't
6211 * guarantee discard_zerors_data 6215 * guarantee discard_zeroes_data
6212 */ 6216 */
6213 mddev->queue->limits.discard_zeroes_data = 0; 6217 mddev->queue->limits.discard_zeroes_data = 0;
6214 6218
@@ -6233,6 +6237,18 @@ static int run(struct mddev *mddev)
6233 !bdev_get_queue(rdev->bdev)-> 6237 !bdev_get_queue(rdev->bdev)->
6234 limits.discard_zeroes_data) 6238 limits.discard_zeroes_data)
6235 discard_supported = false; 6239 discard_supported = false;
6240 /* Unfortunately, discard_zeroes_data is not currently
6241 * a guarantee - just a hint. So we only allow DISCARD
6242 * if the sysadmin has confirmed that only safe devices
6243 * are in use by setting a module parameter.
6244 */
6245 if (!devices_handle_discard_safely) {
6246 if (discard_supported) {
6247 pr_info("md/raid456: discard support disabled due to uncertainty.\n");
6248 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n");
6249 }
6250 discard_supported = false;
6251 }
6236 } 6252 }
6237 6253
6238 if (discard_supported && 6254 if (discard_supported &&
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index a7e24848f6c8..9da812b8a786 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -3524,6 +3524,7 @@ static struct usb_driver em28xx_usb_driver = {
3524 .disconnect = em28xx_usb_disconnect, 3524 .disconnect = em28xx_usb_disconnect,
3525 .suspend = em28xx_usb_suspend, 3525 .suspend = em28xx_usb_suspend,
3526 .resume = em28xx_usb_resume, 3526 .resume = em28xx_usb_resume,
3527 .reset_resume = em28xx_usb_resume,
3527 .id_table = em28xx_id_table, 3528 .id_table = em28xx_id_table,
3528}; 3529};
3529 3530
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2fee73b878c2..823d01c5684c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3236 3236
3237 skb->protocol = eth_type_trans(skb, bp->dev); 3237 skb->protocol = eth_type_trans(skb, bp->dev);
3238 3238
3239 if ((len > (bp->dev->mtu + ETH_HLEN)) && 3239 if (len > (bp->dev->mtu + ETH_HLEN) &&
3240 (ntohs(skb->protocol) != 0x8100)) { 3240 skb->protocol != htons(0x8100) &&
3241 skb->protocol != htons(ETH_P_8021AD)) {
3241 3242
3242 dev_kfree_skb(skb); 3243 dev_kfree_skb(skb);
3243 goto next_rx; 3244 goto next_rx;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e7d3a620d96a..ba499489969a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
6918 skb->protocol = eth_type_trans(skb, tp->dev); 6918 skb->protocol = eth_type_trans(skb, tp->dev);
6919 6919
6920 if (len > (tp->dev->mtu + ETH_HLEN) && 6920 if (len > (tp->dev->mtu + ETH_HLEN) &&
6921 skb->protocol != htons(ETH_P_8021Q)) { 6921 skb->protocol != htons(ETH_P_8021Q) &&
6922 skb->protocol != htons(ETH_P_8021AD)) {
6922 dev_kfree_skb_any(skb); 6923 dev_kfree_skb_any(skb);
6923 goto drop_it_no_recycle; 6924 goto drop_it_no_recycle;
6924 } 6925 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca5d7798b265..e1e02fba4fcc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -30,7 +30,6 @@
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/of_mdio.h> 31#include <linux/of_mdio.h>
32#include <linux/of_net.h> 32#include <linux/of_net.h>
33#include <linux/pinctrl/consumer.h>
34 33
35#include "macb.h" 34#include "macb.h"
36 35
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev)
2071 struct phy_device *phydev; 2070 struct phy_device *phydev;
2072 u32 config; 2071 u32 config;
2073 int err = -ENXIO; 2072 int err = -ENXIO;
2074 struct pinctrl *pinctrl;
2075 const char *mac; 2073 const char *mac;
2076 2074
2077 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2075 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev)
2080 goto err_out; 2078 goto err_out;
2081 } 2079 }
2082 2080
2083 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
2084 if (IS_ERR(pinctrl)) {
2085 err = PTR_ERR(pinctrl);
2086 if (err == -EPROBE_DEFER)
2087 goto err_out;
2088
2089 dev_warn(&pdev->dev, "No pinctrl provided\n");
2090 }
2091
2092 err = -ENOMEM; 2081 err = -ENOMEM;
2093 dev = alloc_etherdev(sizeof(*bp)); 2082 dev = alloc_etherdev(sizeof(*bp));
2094 if (!dev) 2083 if (!dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7e2d5d57c598..871e3a5bda38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
78#endif /* CONFIG_PCI_MSI */ 78#endif /* CONFIG_PCI_MSI */
79 79
80static uint8_t num_vfs[3] = {0, 0, 0}; 80static uint8_t num_vfs[3] = {0, 0, 0};
81static int num_vfs_argc = 3; 81static int num_vfs_argc;
82module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2"); 84 "num_vfs=port1,port2,port1+2");
85 85
86static uint8_t probe_vf[3] = {0, 0, 0}; 86static uint8_t probe_vf[3] = {0, 0, 0};
87static int probe_vfs_argc = 3; 87static int probe_vfs_argc;
88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2"); 90 "probe_vf=port1,port2,port1+2");
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 32058614151a..5c4068353f66 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
135 int i, j; 135 int i, j;
136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
137 137
138 spin_lock(&adapter->tx_clean_lock);
138 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
139 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
140 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
158 } 159 }
159 cmd_buf++; 160 cmd_buf++;
160 } 161 }
162 spin_unlock(&adapter->tx_clean_lock);
161} 163}
162 164
163void netxen_free_sw_resources(struct netxen_adapter *adapter) 165void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1792 break; 1794 break;
1793 } 1795 }
1794 1796
1795 if (count && netif_running(netdev)) { 1797 tx_ring->sw_consumer = sw_consumer;
1796 tx_ring->sw_consumer = sw_consumer;
1797 1798
1799 if (count && netif_running(netdev)) {
1798 smp_mb(); 1800 smp_mb();
1799 1801
1800 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) 1802 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 1159031f885b..5ec5a2b0e989 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1186 return; 1186 return;
1187 1187
1188 smp_mb(); 1188 smp_mb();
1189 spin_lock(&adapter->tx_clean_lock);
1190 netif_carrier_off(netdev); 1189 netif_carrier_off(netdev);
1191 netif_tx_disable(netdev); 1190 netif_tx_disable(netdev);
1192 1191
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1204 netxen_napi_disable(adapter); 1203 netxen_napi_disable(adapter);
1205 1204
1206 netxen_release_tx_buffers(adapter); 1205 netxen_release_tx_buffers(adapter);
1207 spin_unlock(&adapter->tx_clean_lock);
1208} 1206}
1209 1207
1210/* Usage: During suspend and firmware recovery module */ 1208/* Usage: During suspend and firmware recovery module */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 86783e1afcf7..3172cdf591fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
1177{ 1177{
1178 u32 idc_params, val; 1178 u32 idc_params, val;
1179 1179
1180 if (qlcnic_83xx_lockless_flash_read32(adapter, 1180 if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
1181 QLC_83XX_IDC_FLASH_PARAM_ADDR, 1181 (u8 *)&idc_params, 1)) {
1182 (u8 *)&idc_params, 1)) {
1183 dev_info(&adapter->pdev->dev, 1182 dev_info(&adapter->pdev->dev,
1184 "%s:failed to get IDC params from flash\n", __func__); 1183 "%s:failed to get IDC params from flash\n", __func__);
1185 adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; 1184 adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 141f116eb868..494e8105adee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1333 struct qlcnic_host_tx_ring *tx_ring; 1333 struct qlcnic_host_tx_ring *tx_ring;
1334 struct qlcnic_esw_statistics port_stats; 1334 struct qlcnic_esw_statistics port_stats;
1335 struct qlcnic_mac_statistics mac_stats; 1335 struct qlcnic_mac_statistics mac_stats;
1336 int index, ret, length, size, tx_size, ring; 1336 int index, ret, length, size, ring;
1337 char *p; 1337 char *p;
1338 1338
1339 tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; 1339 memset(data, 0, stats->n_stats * sizeof(u64));
1340 1340
1341 memset(data, 0, tx_size * sizeof(u64));
1342 for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { 1341 for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
1343 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 1342 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1344 tx_ring = &adapter->tx_ring[ring]; 1343 tx_ring = &adapter->tx_ring[ring];
1345 data = qlcnic_fill_tx_queue_stats(data, tx_ring); 1344 data = qlcnic_fill_tx_queue_stats(data, tx_ring);
1346 qlcnic_update_stats(adapter); 1345 qlcnic_update_stats(adapter);
1346 } else {
1347 data += QLCNIC_TX_STATS_LEN;
1347 } 1348 }
1348 } 1349 }
1349 1350
1350 memset(data, 0, stats->n_stats * sizeof(u64));
1351 length = QLCNIC_STATS_LEN; 1351 length = QLCNIC_STATS_LEN;
1352 for (index = 0; index < length; index++) { 1352 for (index = 0; index < length; index++) {
1353 p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; 1353 p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6e6ee226de04..b0c1521e08a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2786,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2786 if (IS_ERR(priv->stmmac_clk)) { 2786 if (IS_ERR(priv->stmmac_clk)) {
2787 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", 2787 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2788 __func__); 2788 __func__);
2789 ret = PTR_ERR(priv->stmmac_clk); 2789 /* If failed to obtain stmmac_clk and specific clk_csr value
2790 goto error_clk_get; 2790 * is NOT passed from the platform, probe fail.
2791 */
2792 if (!priv->plat->clk_csr) {
2793 ret = PTR_ERR(priv->stmmac_clk);
2794 goto error_clk_get;
2795 } else {
2796 priv->stmmac_clk = NULL;
2797 }
2791 } 2798 }
2792 clk_prepare_enable(priv->stmmac_clk); 2799 clk_prepare_enable(priv->stmmac_clk);
2793 2800
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index a9c5eaadc426..0fcb5e7eb073 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -387,6 +387,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
387 int hdr_offset; 387 int hdr_offset;
388 u32 net_trans_info; 388 u32 net_trans_info;
389 u32 hash; 389 u32 hash;
390 u32 skb_length = skb->len;
390 391
391 392
392 /* We will atmost need two pages to describe the rndis 393 /* We will atmost need two pages to describe the rndis
@@ -562,7 +563,7 @@ do_send:
562 563
563drop: 564drop:
564 if (ret == 0) { 565 if (ret == 0) {
565 net->stats.tx_bytes += skb->len; 566 net->stats.tx_bytes += skb_length;
566 net->stats.tx_packets++; 567 net->stats.tx_packets++;
567 } else { 568 } else {
568 kfree(packet); 569 kfree(packet);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3381c4f91a8c..0c6adaaf898c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -112,17 +112,15 @@ out:
112 return err; 112 return err;
113} 113}
114 114
115/* Requires RTNL */
115static int macvtap_set_queue(struct net_device *dev, struct file *file, 116static int macvtap_set_queue(struct net_device *dev, struct file *file,
116 struct macvtap_queue *q) 117 struct macvtap_queue *q)
117{ 118{
118 struct macvlan_dev *vlan = netdev_priv(dev); 119 struct macvlan_dev *vlan = netdev_priv(dev);
119 int err = -EBUSY;
120 120
121 rtnl_lock();
122 if (vlan->numqueues == MAX_MACVTAP_QUEUES) 121 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
123 goto out; 122 return -EBUSY;
124 123
125 err = 0;
126 rcu_assign_pointer(q->vlan, vlan); 124 rcu_assign_pointer(q->vlan, vlan);
127 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); 125 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
128 sock_hold(&q->sk); 126 sock_hold(&q->sk);
@@ -136,9 +134,7 @@ static int macvtap_set_queue(struct net_device *dev, struct file *file,
136 vlan->numvtaps++; 134 vlan->numvtaps++;
137 vlan->numqueues++; 135 vlan->numqueues++;
138 136
139out: 137 return 0;
140 rtnl_unlock();
141 return err;
142} 138}
143 139
144static int macvtap_disable_queue(struct macvtap_queue *q) 140static int macvtap_disable_queue(struct macvtap_queue *q)
@@ -454,11 +450,12 @@ static void macvtap_sock_destruct(struct sock *sk)
454static int macvtap_open(struct inode *inode, struct file *file) 450static int macvtap_open(struct inode *inode, struct file *file)
455{ 451{
456 struct net *net = current->nsproxy->net_ns; 452 struct net *net = current->nsproxy->net_ns;
457 struct net_device *dev = dev_get_by_macvtap_minor(iminor(inode)); 453 struct net_device *dev;
458 struct macvtap_queue *q; 454 struct macvtap_queue *q;
459 int err; 455 int err = -ENODEV;
460 456
461 err = -ENODEV; 457 rtnl_lock();
458 dev = dev_get_by_macvtap_minor(iminor(inode));
462 if (!dev) 459 if (!dev)
463 goto out; 460 goto out;
464 461
@@ -498,6 +495,7 @@ out:
498 if (dev) 495 if (dev)
499 dev_put(dev); 496 dev_put(dev);
500 497
498 rtnl_unlock();
501 return err; 499 return err;
502} 500}
503 501
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 74760e8143e3..604ef210a4de 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -24,7 +24,7 @@
24#include <net/ip6_checksum.h> 24#include <net/ip6_checksum.h>
25 25
26/* Version Information */ 26/* Version Information */
27#define DRIVER_VERSION "v1.06.0 (2014/03/03)" 27#define DRIVER_VERSION "v1.06.1 (2014/10/01)"
28#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 28#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
29#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 29#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
30#define MODULENAME "r8152" 30#define MODULENAME "r8152"
@@ -1949,10 +1949,34 @@ static void rxdy_gated_en(struct r8152 *tp, bool enable)
1949 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data); 1949 ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
1950} 1950}
1951 1951
1952static int rtl_start_rx(struct r8152 *tp)
1953{
1954 int i, ret = 0;
1955
1956 INIT_LIST_HEAD(&tp->rx_done);
1957 for (i = 0; i < RTL8152_MAX_RX; i++) {
1958 INIT_LIST_HEAD(&tp->rx_info[i].list);
1959 ret = r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
1960 if (ret)
1961 break;
1962 }
1963
1964 return ret;
1965}
1966
1967static int rtl_stop_rx(struct r8152 *tp)
1968{
1969 int i;
1970
1971 for (i = 0; i < RTL8152_MAX_RX; i++)
1972 usb_kill_urb(tp->rx_info[i].urb);
1973
1974 return 0;
1975}
1976
1952static int rtl_enable(struct r8152 *tp) 1977static int rtl_enable(struct r8152 *tp)
1953{ 1978{
1954 u32 ocp_data; 1979 u32 ocp_data;
1955 int i, ret;
1956 1980
1957 r8152b_reset_packet_filter(tp); 1981 r8152b_reset_packet_filter(tp);
1958 1982
@@ -1962,14 +1986,7 @@ static int rtl_enable(struct r8152 *tp)
1962 1986
1963 rxdy_gated_en(tp, false); 1987 rxdy_gated_en(tp, false);
1964 1988
1965 INIT_LIST_HEAD(&tp->rx_done); 1989 return rtl_start_rx(tp);
1966 ret = 0;
1967 for (i = 0; i < RTL8152_MAX_RX; i++) {
1968 INIT_LIST_HEAD(&tp->rx_info[i].list);
1969 ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
1970 }
1971
1972 return ret;
1973} 1990}
1974 1991
1975static int rtl8152_enable(struct r8152 *tp) 1992static int rtl8152_enable(struct r8152 *tp)
@@ -2053,8 +2070,7 @@ static void rtl_disable(struct r8152 *tp)
2053 mdelay(1); 2070 mdelay(1);
2054 } 2071 }
2055 2072
2056 for (i = 0; i < RTL8152_MAX_RX; i++) 2073 rtl_stop_rx(tp);
2057 usb_kill_urb(tp->rx_info[i].urb);
2058 2074
2059 rtl8152_nic_reset(tp); 2075 rtl8152_nic_reset(tp);
2060} 2076}
@@ -2185,28 +2201,6 @@ static void rtl_phy_reset(struct r8152 *tp)
2185 } 2201 }
2186} 2202}
2187 2203
2188static void rtl_clear_bp(struct r8152 *tp)
2189{
2190 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
2191 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
2192 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
2193 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
2194 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
2195 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
2196 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
2197 ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
2198 mdelay(3);
2199 ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
2200 ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
2201}
2202
2203static void r8153_clear_bp(struct r8152 *tp)
2204{
2205 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
2206 ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
2207 rtl_clear_bp(tp);
2208}
2209
2210static void r8153_teredo_off(struct r8152 *tp) 2204static void r8153_teredo_off(struct r8152 *tp)
2211{ 2205{
2212 u32 ocp_data; 2206 u32 ocp_data;
@@ -2249,8 +2243,6 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
2249 r8152_mdio_write(tp, MII_BMCR, data); 2243 r8152_mdio_write(tp, MII_BMCR, data);
2250 } 2244 }
2251 2245
2252 rtl_clear_bp(tp);
2253
2254 set_bit(PHY_RESET, &tp->flags); 2246 set_bit(PHY_RESET, &tp->flags);
2255} 2247}
2256 2248
@@ -2401,8 +2393,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2401 r8152_mdio_write(tp, MII_BMCR, data); 2393 r8152_mdio_write(tp, MII_BMCR, data);
2402 } 2394 }
2403 2395
2404 r8153_clear_bp(tp);
2405
2406 if (tp->version == RTL_VER_03) { 2396 if (tp->version == RTL_VER_03) {
2407 data = ocp_reg_read(tp, OCP_EEE_CFG); 2397 data = ocp_reg_read(tp, OCP_EEE_CFG);
2408 data &= ~CTAP_SHORT_EN; 2398 data &= ~CTAP_SHORT_EN;
@@ -3083,13 +3073,14 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3083 clear_bit(WORK_ENABLE, &tp->flags); 3073 clear_bit(WORK_ENABLE, &tp->flags);
3084 usb_kill_urb(tp->intr_urb); 3074 usb_kill_urb(tp->intr_urb);
3085 cancel_delayed_work_sync(&tp->schedule); 3075 cancel_delayed_work_sync(&tp->schedule);
3076 tasklet_disable(&tp->tl);
3086 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3077 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3078 rtl_stop_rx(tp);
3087 rtl_runtime_suspend_enable(tp, true); 3079 rtl_runtime_suspend_enable(tp, true);
3088 } else { 3080 } else {
3089 tasklet_disable(&tp->tl);
3090 tp->rtl_ops.down(tp); 3081 tp->rtl_ops.down(tp);
3091 tasklet_enable(&tp->tl);
3092 } 3082 }
3083 tasklet_enable(&tp->tl);
3093 } 3084 }
3094 3085
3095 return 0; 3086 return 0;
@@ -3108,17 +3099,18 @@ static int rtl8152_resume(struct usb_interface *intf)
3108 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3099 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3109 rtl_runtime_suspend_enable(tp, false); 3100 rtl_runtime_suspend_enable(tp, false);
3110 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3101 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3102 set_bit(WORK_ENABLE, &tp->flags);
3111 if (tp->speed & LINK_STATUS) 3103 if (tp->speed & LINK_STATUS)
3112 tp->rtl_ops.disable(tp); 3104 rtl_start_rx(tp);
3113 } else { 3105 } else {
3114 tp->rtl_ops.up(tp); 3106 tp->rtl_ops.up(tp);
3115 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3107 rtl8152_set_speed(tp, AUTONEG_ENABLE,
3116 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, 3108 tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
3117 DUPLEX_FULL); 3109 DUPLEX_FULL);
3110 tp->speed = 0;
3111 netif_carrier_off(tp->netdev);
3112 set_bit(WORK_ENABLE, &tp->flags);
3118 } 3113 }
3119 tp->speed = 0;
3120 netif_carrier_off(tp->netdev);
3121 set_bit(WORK_ENABLE, &tp->flags);
3122 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3114 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3123 } 3115 }
3124 3116
@@ -3405,7 +3397,7 @@ static void rtl8153_unload(struct r8152 *tp)
3405 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3397 if (test_bit(RTL8152_UNPLUG, &tp->flags))
3406 return; 3398 return;
3407 3399
3408 r8153_power_cut_en(tp, true); 3400 r8153_power_cut_en(tp, false);
3409} 3401}
3410 3402
3411static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id) 3403static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -3558,7 +3550,11 @@ static void rtl8152_disconnect(struct usb_interface *intf)
3558 3550
3559 usb_set_intfdata(intf, NULL); 3551 usb_set_intfdata(intf, NULL);
3560 if (tp) { 3552 if (tp) {
3561 set_bit(RTL8152_UNPLUG, &tp->flags); 3553 struct usb_device *udev = tp->udev;
3554
3555 if (udev->state == USB_STATE_NOTATTACHED)
3556 set_bit(RTL8152_UNPLUG, &tp->flags);
3557
3562 tasklet_kill(&tp->tl); 3558 tasklet_kill(&tp->tl);
3563 unregister_netdev(tp->netdev); 3559 unregister_netdev(tp->netdev);
3564 tp->rtl_ops.unload(tp); 3560 tp->rtl_ops.unload(tp);
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index a042d065a0c7..8be2096c8423 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -395,7 +395,8 @@ static void __init superio_serial_init(void)
395 serial_port.iotype = UPIO_PORT; 395 serial_port.iotype = UPIO_PORT;
396 serial_port.type = PORT_16550A; 396 serial_port.type = PORT_16550A;
397 serial_port.uartclk = 115200*16; 397 serial_port.uartclk = 115200*16;
398 serial_port.fifosize = 16; 398 serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE |
399 UPF_BOOT_AUTOCONF;
399 400
400 /* serial port #1 */ 401 /* serial port #1 */
401 serial_port.iobase = sio_dev.sp1_base; 402 serial_port.iobase = sio_dev.sp1_base;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 82e06a86cd77..a9f9c46e5022 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -41,11 +41,17 @@ static int __init pcie_pme_setup(char *str)
41} 41}
42__setup("pcie_pme=", pcie_pme_setup); 42__setup("pcie_pme=", pcie_pme_setup);
43 43
44enum pme_suspend_level {
45 PME_SUSPEND_NONE = 0,
46 PME_SUSPEND_WAKEUP,
47 PME_SUSPEND_NOIRQ,
48};
49
44struct pcie_pme_service_data { 50struct pcie_pme_service_data {
45 spinlock_t lock; 51 spinlock_t lock;
46 struct pcie_device *srv; 52 struct pcie_device *srv;
47 struct work_struct work; 53 struct work_struct work;
48 bool noirq; /* Don't enable the PME interrupt used by this service. */ 54 enum pme_suspend_level suspend_level;
49}; 55};
50 56
51/** 57/**
@@ -223,7 +229,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
223 spin_lock_irq(&data->lock); 229 spin_lock_irq(&data->lock);
224 230
225 for (;;) { 231 for (;;) {
226 if (data->noirq) 232 if (data->suspend_level != PME_SUSPEND_NONE)
227 break; 233 break;
228 234
229 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); 235 pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
@@ -250,7 +256,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
250 spin_lock_irq(&data->lock); 256 spin_lock_irq(&data->lock);
251 } 257 }
252 258
253 if (!data->noirq) 259 if (data->suspend_level == PME_SUSPEND_NONE)
254 pcie_pme_interrupt_enable(port, true); 260 pcie_pme_interrupt_enable(port, true);
255 261
256 spin_unlock_irq(&data->lock); 262 spin_unlock_irq(&data->lock);
@@ -367,6 +373,21 @@ static int pcie_pme_probe(struct pcie_device *srv)
367 return ret; 373 return ret;
368} 374}
369 375
376static bool pcie_pme_check_wakeup(struct pci_bus *bus)
377{
378 struct pci_dev *dev;
379
380 if (!bus)
381 return false;
382
383 list_for_each_entry(dev, &bus->devices, bus_list)
384 if (device_may_wakeup(&dev->dev)
385 || pcie_pme_check_wakeup(dev->subordinate))
386 return true;
387
388 return false;
389}
390
370/** 391/**
371 * pcie_pme_suspend - Suspend PCIe PME service device. 392 * pcie_pme_suspend - Suspend PCIe PME service device.
372 * @srv: PCIe service device to suspend. 393 * @srv: PCIe service device to suspend.
@@ -375,11 +396,26 @@ static int pcie_pme_suspend(struct pcie_device *srv)
375{ 396{
376 struct pcie_pme_service_data *data = get_service_data(srv); 397 struct pcie_pme_service_data *data = get_service_data(srv);
377 struct pci_dev *port = srv->port; 398 struct pci_dev *port = srv->port;
399 bool wakeup;
378 400
401 if (device_may_wakeup(&port->dev)) {
402 wakeup = true;
403 } else {
404 down_read(&pci_bus_sem);
405 wakeup = pcie_pme_check_wakeup(port->subordinate);
406 up_read(&pci_bus_sem);
407 }
379 spin_lock_irq(&data->lock); 408 spin_lock_irq(&data->lock);
380 pcie_pme_interrupt_enable(port, false); 409 if (wakeup) {
381 pcie_clear_root_pme_status(port); 410 enable_irq_wake(srv->irq);
382 data->noirq = true; 411 data->suspend_level = PME_SUSPEND_WAKEUP;
412 } else {
413 struct pci_dev *port = srv->port;
414
415 pcie_pme_interrupt_enable(port, false);
416 pcie_clear_root_pme_status(port);
417 data->suspend_level = PME_SUSPEND_NOIRQ;
418 }
383 spin_unlock_irq(&data->lock); 419 spin_unlock_irq(&data->lock);
384 420
385 synchronize_irq(srv->irq); 421 synchronize_irq(srv->irq);
@@ -394,12 +430,17 @@ static int pcie_pme_suspend(struct pcie_device *srv)
394static int pcie_pme_resume(struct pcie_device *srv) 430static int pcie_pme_resume(struct pcie_device *srv)
395{ 431{
396 struct pcie_pme_service_data *data = get_service_data(srv); 432 struct pcie_pme_service_data *data = get_service_data(srv);
397 struct pci_dev *port = srv->port;
398 433
399 spin_lock_irq(&data->lock); 434 spin_lock_irq(&data->lock);
400 data->noirq = false; 435 if (data->suspend_level == PME_SUSPEND_NOIRQ) {
401 pcie_clear_root_pme_status(port); 436 struct pci_dev *port = srv->port;
402 pcie_pme_interrupt_enable(port, true); 437
438 pcie_clear_root_pme_status(port);
439 pcie_pme_interrupt_enable(port, true);
440 } else {
441 disable_irq_wake(srv->irq);
442 }
443 data->suspend_level = PME_SUSPEND_NONE;
403 spin_unlock_irq(&data->lock); 444 spin_unlock_irq(&data->lock);
404 445
405 return 0; 446 return 0;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 87aa28c4280f..2655d4a988f3 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1050,6 +1050,13 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = {
1050 }, 1050 },
1051}; 1051};
1052 1052
1053static const struct acpi_device_id fujitsu_ids[] __used = {
1054 {ACPI_FUJITSU_HID, 0},
1055 {ACPI_FUJITSU_HOTKEY_HID, 0},
1056 {"", 0}
1057};
1058MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
1059
1053static int __init fujitsu_init(void) 1060static int __init fujitsu_init(void)
1054{ 1061{
1055 int ret, result, max_brightness; 1062 int ret, result, max_brightness;
@@ -1208,12 +1215,3 @@ MODULE_LICENSE("GPL");
1208MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*"); 1215MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
1209MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*"); 1216MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
1210MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*"); 1217MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
1211
1212static struct pnp_device_id pnp_ids[] __used = {
1213 {.id = "FUJ02bf"},
1214 {.id = "FUJ02B1"},
1215 {.id = "FUJ02E3"},
1216 {.id = ""}
1217};
1218
1219MODULE_DEVICE_TABLE(pnp, pnp_ids);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 79788a12712d..02e69e7ee4a3 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1647,7 +1647,7 @@ static int cxgbi_inet6addr_handler(struct notifier_block *this,
1647 if (event_dev->priv_flags & IFF_802_1Q_VLAN) 1647 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1648 event_dev = vlan_dev_real_dev(event_dev); 1648 event_dev = vlan_dev_real_dev(event_dev);
1649 1649
1650 cdev = cxgbi_device_find_by_netdev(event_dev, NULL); 1650 cdev = cxgbi_device_find_by_netdev_rcu(event_dev, NULL);
1651 1651
1652 if (!cdev) 1652 if (!cdev)
1653 return ret; 1653 return ret;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d65df6dc106f..addd1dddce14 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
57static LIST_HEAD(cdev_list); 57static LIST_HEAD(cdev_list);
58static DEFINE_MUTEX(cdev_mutex); 58static DEFINE_MUTEX(cdev_mutex);
59 59
60static LIST_HEAD(cdev_rcu_list);
61static DEFINE_SPINLOCK(cdev_rcu_lock);
62
60int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base, 63int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
61 unsigned int max_conn) 64 unsigned int max_conn)
62{ 65{
@@ -142,6 +145,10 @@ struct cxgbi_device *cxgbi_device_register(unsigned int extra,
142 list_add_tail(&cdev->list_head, &cdev_list); 145 list_add_tail(&cdev->list_head, &cdev_list);
143 mutex_unlock(&cdev_mutex); 146 mutex_unlock(&cdev_mutex);
144 147
148 spin_lock(&cdev_rcu_lock);
149 list_add_tail_rcu(&cdev->rcu_node, &cdev_rcu_list);
150 spin_unlock(&cdev_rcu_lock);
151
145 log_debug(1 << CXGBI_DBG_DEV, 152 log_debug(1 << CXGBI_DBG_DEV,
146 "cdev 0x%p, p# %u.\n", cdev, nports); 153 "cdev 0x%p, p# %u.\n", cdev, nports);
147 return cdev; 154 return cdev;
@@ -153,9 +160,16 @@ void cxgbi_device_unregister(struct cxgbi_device *cdev)
153 log_debug(1 << CXGBI_DBG_DEV, 160 log_debug(1 << CXGBI_DBG_DEV,
154 "cdev 0x%p, p# %u,%s.\n", 161 "cdev 0x%p, p# %u,%s.\n",
155 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : ""); 162 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
163
156 mutex_lock(&cdev_mutex); 164 mutex_lock(&cdev_mutex);
157 list_del(&cdev->list_head); 165 list_del(&cdev->list_head);
158 mutex_unlock(&cdev_mutex); 166 mutex_unlock(&cdev_mutex);
167
168 spin_lock(&cdev_rcu_lock);
169 list_del_rcu(&cdev->rcu_node);
170 spin_unlock(&cdev_rcu_lock);
171 synchronize_rcu();
172
159 cxgbi_device_destroy(cdev); 173 cxgbi_device_destroy(cdev);
160} 174}
161EXPORT_SYMBOL_GPL(cxgbi_device_unregister); 175EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
@@ -167,12 +181,9 @@ void cxgbi_device_unregister_all(unsigned int flag)
167 mutex_lock(&cdev_mutex); 181 mutex_lock(&cdev_mutex);
168 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { 182 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
169 if ((cdev->flags & flag) == flag) { 183 if ((cdev->flags & flag) == flag) {
170 log_debug(1 << CXGBI_DBG_DEV, 184 mutex_unlock(&cdev_mutex);
171 "cdev 0x%p, p# %u,%s.\n", 185 cxgbi_device_unregister(cdev);
172 cdev, cdev->nports, cdev->nports ? 186 mutex_lock(&cdev_mutex);
173 cdev->ports[0]->name : "");
174 list_del(&cdev->list_head);
175 cxgbi_device_destroy(cdev);
176 } 187 }
177 } 188 }
178 mutex_unlock(&cdev_mutex); 189 mutex_unlock(&cdev_mutex);
@@ -191,6 +202,7 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
191 } 202 }
192 } 203 }
193 mutex_unlock(&cdev_mutex); 204 mutex_unlock(&cdev_mutex);
205
194 log_debug(1 << CXGBI_DBG_DEV, 206 log_debug(1 << CXGBI_DBG_DEV,
195 "lldev 0x%p, NO match found.\n", lldev); 207 "lldev 0x%p, NO match found.\n", lldev);
196 return NULL; 208 return NULL;
@@ -230,6 +242,39 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
230} 242}
231EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); 243EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
232 244
245struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
246 int *port)
247{
248 struct net_device *vdev = NULL;
249 struct cxgbi_device *cdev;
250 int i;
251
252 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
253 vdev = ndev;
254 ndev = vlan_dev_real_dev(ndev);
255 pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
256 }
257
258 rcu_read_lock();
259 list_for_each_entry_rcu(cdev, &cdev_rcu_list, rcu_node) {
260 for (i = 0; i < cdev->nports; i++) {
261 if (ndev == cdev->ports[i]) {
262 cdev->hbas[i]->vdev = vdev;
263 rcu_read_unlock();
264 if (port)
265 *port = i;
266 return cdev;
267 }
268 }
269 }
270 rcu_read_unlock();
271
272 log_debug(1 << CXGBI_DBG_DEV,
273 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
274 return NULL;
275}
276EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev_rcu);
277
233static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, 278static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
234 int *port) 279 int *port)
235{ 280{
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index b3e6e7541cc5..1d98fad6a0ab 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -527,6 +527,7 @@ struct cxgbi_ports_map {
527#define CXGBI_FLAG_IPV4_SET 0x10 527#define CXGBI_FLAG_IPV4_SET 0x10
528struct cxgbi_device { 528struct cxgbi_device {
529 struct list_head list_head; 529 struct list_head list_head;
530 struct list_head rcu_node;
530 unsigned int flags; 531 unsigned int flags;
531 struct net_device **ports; 532 struct net_device **ports;
532 void *lldev; 533 void *lldev;
@@ -709,6 +710,8 @@ void cxgbi_device_unregister(struct cxgbi_device *);
709void cxgbi_device_unregister_all(unsigned int flag); 710void cxgbi_device_unregister_all(unsigned int flag);
710struct cxgbi_device *cxgbi_device_find_by_lldev(void *); 711struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
711struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); 712struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
713struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *,
714 int *);
712int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int, 715int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
713 struct scsi_host_template *, 716 struct scsi_host_template *,
714 struct scsi_transport_template *); 717 struct scsi_transport_template *);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 3f42785f653c..9bfa7252f7f9 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -970,6 +970,13 @@ static struct scsi_host_template uas_host_template = {
970 .cmd_per_lun = 1, /* until we override it */ 970 .cmd_per_lun = 1, /* until we override it */
971 .skip_settle_delay = 1, 971 .skip_settle_delay = 1,
972 .ordered_tag = 1, 972 .ordered_tag = 1,
973
974 /*
975 * The uas drivers expects tags not to be bigger than the maximum
976 * per-device queue depth, which is not true with the blk-mq tag
977 * allocator.
978 */
979 .disable_blk_mq = true,
973}; 980};
974 981
975#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ 982#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7c018a1c52f7..5f29354b072a 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3568,15 +3568,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
3568 lru_cache_add_file(page); 3568 lru_cache_add_file(page);
3569 unlock_page(page); 3569 unlock_page(page);
3570 page_cache_release(page); 3570 page_cache_release(page);
3571 if (rc == -EAGAIN)
3572 list_add_tail(&page->lru, &tmplist);
3573 } 3571 }
3572 /* Fallback to the readpage in error/reconnect cases */
3574 kref_put(&rdata->refcount, cifs_readdata_release); 3573 kref_put(&rdata->refcount, cifs_readdata_release);
3575 if (rc == -EAGAIN) {
3576 /* Re-add pages to the page_list and retry */
3577 list_splice(&tmplist, page_list);
3578 continue;
3579 }
3580 break; 3574 break;
3581 } 3575 }
3582 3576
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 1a6df4b03f67..52131d8cb4d5 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -586,7 +586,7 @@ cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
586 tmprc = CIFS_open(xid, &oparms, &oplock, NULL); 586 tmprc = CIFS_open(xid, &oparms, &oplock, NULL);
587 if (tmprc == -EOPNOTSUPP) 587 if (tmprc == -EOPNOTSUPP)
588 *symlink = true; 588 *symlink = true;
589 else 589 else if (tmprc == 0)
590 CIFSSMBClose(xid, tcon, fid.netfid); 590 CIFSSMBClose(xid, tcon, fid.netfid);
591 } 591 }
592 592
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index af59d03db492..8257a5a97cc0 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -256,6 +256,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
256 {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO, 256 {STATUS_DLL_MIGHT_BE_INCOMPATIBLE, -EIO,
257 "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"}, 257 "STATUS_DLL_MIGHT_BE_INCOMPATIBLE"},
258 {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"}, 258 {STATUS_STOPPED_ON_SYMLINK, -EOPNOTSUPP, "STATUS_STOPPED_ON_SYMLINK"},
259 {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EOPNOTSUPP,
260 "STATUS_REPARSE_NOT_HANDLED"},
259 {STATUS_DEVICE_REQUIRES_CLEANING, -EIO, 261 {STATUS_DEVICE_REQUIRES_CLEANING, -EIO,
260 "STATUS_DEVICE_REQUIRES_CLEANING"}, 262 "STATUS_DEVICE_REQUIRES_CLEANING"},
261 {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"}, 263 {STATUS_DEVICE_DOOR_OPEN, -EIO, "STATUS_DEVICE_DOOR_OPEN"},
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index e94457c33ad6..b01f6e100ee8 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3104,7 +3104,8 @@ static __be32 nfsd4_encode_splice_read(
3104 3104
3105 buf->page_len = maxcount; 3105 buf->page_len = maxcount;
3106 buf->len += maxcount; 3106 buf->len += maxcount;
3107 xdr->page_ptr += (maxcount + PAGE_SIZE - 1) / PAGE_SIZE; 3107 xdr->page_ptr += (buf->page_base + maxcount + PAGE_SIZE - 1)
3108 / PAGE_SIZE;
3108 3109
3109 /* Use rest of head for padding and remaining ops: */ 3110 /* Use rest of head for padding and remaining ops: */
3110 buf->tail[0].iov_base = xdr->p; 3111 buf->tail[0].iov_base = xdr->p;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index e3cfa0227026..12ba682fc53c 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2039,6 +2039,10 @@ kill:
2039 "and killing the other node now! This node is OK and can continue.\n"); 2039 "and killing the other node now! This node is OK and can continue.\n");
2040 __dlm_print_one_lock_resource(res); 2040 __dlm_print_one_lock_resource(res);
2041 spin_unlock(&res->spinlock); 2041 spin_unlock(&res->spinlock);
2042 spin_lock(&dlm->master_lock);
2043 if (mle)
2044 __dlm_put_mle(mle);
2045 spin_unlock(&dlm->master_lock);
2042 spin_unlock(&dlm->spinlock); 2046 spin_unlock(&dlm->spinlock);
2043 *ret_data = (void *)res; 2047 *ret_data = (void *)res;
2044 dlm_put(dlm); 2048 dlm_put(dlm);
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index c728113374f5..f97804bdf1ff 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -59,6 +59,10 @@
59#define METHOD_NAME__PRS "_PRS" 59#define METHOD_NAME__PRS "_PRS"
60#define METHOD_NAME__PRT "_PRT" 60#define METHOD_NAME__PRT "_PRT"
61#define METHOD_NAME__PRW "_PRW" 61#define METHOD_NAME__PRW "_PRW"
62#define METHOD_NAME__PS0 "_PS0"
63#define METHOD_NAME__PS1 "_PS1"
64#define METHOD_NAME__PS2 "_PS2"
65#define METHOD_NAME__PS3 "_PS3"
62#define METHOD_NAME__REG "_REG" 66#define METHOD_NAME__REG "_REG"
63#define METHOD_NAME__SB_ "_SB_" 67#define METHOD_NAME__SB_ "_SB_"
64#define METHOD_NAME__SEG "_SEG" 68#define METHOD_NAME__SEG "_SEG"
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b7c89d47efbe..9fc1d71c82bc 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20140724 49#define ACPI_CA_VERSION 0x20140828
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
@@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
692 *event_status)) 692 *event_status))
693ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) 693ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
694ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) 694ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
695ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
695 696
696ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 697ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
697 acpi_get_gpe_device(u32 gpe_index, 698 acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7626bfeac2cb..29e79370641d 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -952,7 +952,8 @@ enum acpi_srat_type {
952 ACPI_SRAT_TYPE_CPU_AFFINITY = 0, 952 ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
953 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1, 953 ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
954 ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2, 954 ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
955 ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */ 955 ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
956 ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
956}; 957};
957 958
958/* 959/*
@@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity {
968 u32 flags; 969 u32 flags;
969 u8 local_sapic_eid; 970 u8 local_sapic_eid;
970 u8 proximity_domain_hi[3]; 971 u8 proximity_domain_hi[3];
971 u32 reserved; /* Reserved, must be zero */ 972 u32 clock_domain;
972}; 973};
973 974
974/* Flags */ 975/* Flags */
@@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity {
1010 1011
1011#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */ 1012#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
1012 1013
1014/* 3: GICC Affinity (ACPI 5.1) */
1015
1016struct acpi_srat_gicc_affinity {
1017 struct acpi_subtable_header header;
1018 u32 proximity_domain;
1019 u32 acpi_processor_uid;
1020 u32 flags;
1021 u32 clock_domain;
1022};
1023
1024/* Flags for struct acpi_srat_gicc_affinity */
1025
1026#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
1027
1013/* Reset to default packing */ 1028/* Reset to default packing */
1014 1029
1015#pragma pack() 1030#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 787bcc814463..5480cb2236bf 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry {
310 u32 common_flags; 310 u32 common_flags;
311}; 311};
312 312
313/* Flag Definitions: timer_flags and virtual_timer_flags above */
314
315#define ACPI_GTDT_GT_IRQ_MODE (1)
316#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
317
313/* Flag Definitions: common_flags above */ 318/* Flag Definitions: common_flags above */
314 319
315#define ACPI_GTDT_GT_IS_SECURE_TIMER (1) 320#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
316#define ACPI_GTDT_GT_ALWAYS_ON (1<<1) 321#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
317 322
318/* 1: SBSA Generic Watchdog Structure */ 323/* 1: SBSA Generic Watchdog Structure */
319 324
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 698ad053d064..69517a24bc50 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id);
193/* The following three functions are for the core kernel use only. */ 193/* The following three functions are for the core kernel use only. */
194extern void suspend_device_irqs(void); 194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void); 195extern void resume_device_irqs(void);
196#ifdef CONFIG_PM_SLEEP
197extern int check_wakeup_irqs(void);
198#else
199static inline int check_wakeup_irqs(void) { return 0; }
200#endif
201 196
202/** 197/**
203 * struct irq_affinity_notify - context for notification of IRQ affinity changes 198 * struct irq_affinity_notify - context for notification of IRQ affinity changes
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62af59242ddc..03f48d936f66 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -173,6 +173,7 @@ struct irq_data {
173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt 173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
174 * IRQD_IRQ_MASKED - Masked state of the interrupt 174 * IRQD_IRQ_MASKED - Masked state of the interrupt
175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
176 * IRQD_WAKEUP_ARMED - Wakeup mode armed
176 */ 177 */
177enum { 178enum {
178 IRQD_TRIGGER_MASK = 0xf, 179 IRQD_TRIGGER_MASK = 0xf,
@@ -186,6 +187,7 @@ enum {
186 IRQD_IRQ_DISABLED = (1 << 16), 187 IRQD_IRQ_DISABLED = (1 << 16),
187 IRQD_IRQ_MASKED = (1 << 17), 188 IRQD_IRQ_MASKED = (1 << 17),
188 IRQD_IRQ_INPROGRESS = (1 << 18), 189 IRQD_IRQ_INPROGRESS = (1 << 18),
190 IRQD_WAKEUP_ARMED = (1 << 19),
189}; 191};
190 192
191static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 193static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d)
257 return d->state_use_accessors & IRQD_IRQ_INPROGRESS; 259 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
258} 260}
259 261
262static inline bool irqd_is_wakeup_armed(struct irq_data *d)
263{
264 return d->state_use_accessors & IRQD_WAKEUP_ARMED;
265}
266
267
260/* 268/*
261 * Functions for chained handlers which can be enabled/disabled by the 269 * Functions for chained handlers which can be enabled/disabled by the
262 * standard disable_irq/enable_irq calls. Must be called with 270 * standard disable_irq/enable_irq calls. Must be called with
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 472c021a2d4f..cb1a31e448ae 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -36,6 +36,11 @@ struct irq_desc;
36 * @threads_oneshot: bitfield to handle shared oneshot threads 36 * @threads_oneshot: bitfield to handle shared oneshot threads
37 * @threads_active: number of irqaction threads currently running 37 * @threads_active: number of irqaction threads currently running
38 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 38 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
39 * @nr_actions: number of installed actions on this descriptor
40 * @no_suspend_depth: number of irqactions on a irq descriptor with
41 * IRQF_NO_SUSPEND set
42 * @force_resume_depth: number of irqactions on a irq descriptor with
43 * IRQF_FORCE_RESUME set
39 * @dir: /proc/irq/ procfs entry 44 * @dir: /proc/irq/ procfs entry
40 * @name: flow handler name for /proc/interrupts output 45 * @name: flow handler name for /proc/interrupts output
41 */ 46 */
@@ -68,6 +73,11 @@ struct irq_desc {
68 unsigned long threads_oneshot; 73 unsigned long threads_oneshot;
69 atomic_t threads_active; 74 atomic_t threads_active;
70 wait_queue_head_t wait_for_threads; 75 wait_queue_head_t wait_for_threads;
76#ifdef CONFIG_PM_SLEEP
77 unsigned int nr_actions;
78 unsigned int no_suspend_depth;
79 unsigned int force_resume_depth;
80#endif
71#ifdef CONFIG_PROC_FS 81#ifdef CONFIG_PROC_FS
72 struct proc_dir_entry *dir; 82 struct proc_dir_entry *dir;
73#endif 83#endif
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 519064e0c943..06a9910827c2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -371,6 +371,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
371extern bool events_check_enabled; 371extern bool events_check_enabled;
372 372
373extern bool pm_wakeup_pending(void); 373extern bool pm_wakeup_pending(void);
374extern void pm_system_wakeup(void);
375extern void pm_wakeup_clear(void);
374extern bool pm_get_wakeup_count(unsigned int *count, bool block); 376extern bool pm_get_wakeup_count(unsigned int *count, bool block);
375extern bool pm_save_wakeup_count(unsigned int count); 377extern bool pm_save_wakeup_count(unsigned int count);
376extern void pm_wakep_autosleep_enabled(bool set); 378extern void pm_wakep_autosleep_enabled(bool set);
@@ -418,6 +420,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
418#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 420#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
419 421
420static inline bool pm_wakeup_pending(void) { return false; } 422static inline bool pm_wakeup_pending(void) { return false; }
423static inline void pm_system_wakeup(void) {}
424static inline void pm_wakeup_clear(void) {}
421 425
422static inline void lock_system_sleep(void) {} 426static inline void lock_system_sleep(void) {}
423static inline void unlock_system_sleep(void) {} 427static inline void unlock_system_sleep(void) {}
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 9bcb220bd4ad..cf485f9aa563 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -114,16 +114,13 @@ struct rt6_info {
114 u32 rt6i_flags; 114 u32 rt6i_flags;
115 struct rt6key rt6i_src; 115 struct rt6key rt6i_src;
116 struct rt6key rt6i_prefsrc; 116 struct rt6key rt6i_prefsrc;
117 u32 rt6i_metric;
118 117
119 struct inet6_dev *rt6i_idev; 118 struct inet6_dev *rt6i_idev;
120 unsigned long _rt6i_peer; 119 unsigned long _rt6i_peer;
121 120
122 u32 rt6i_genid; 121 u32 rt6i_metric;
123
124 /* more non-fragment space at head required */ 122 /* more non-fragment space at head required */
125 unsigned short rt6i_nfheader_len; 123 unsigned short rt6i_nfheader_len;
126
127 u8 rt6i_protocol; 124 u8 rt6i_protocol;
128}; 125};
129 126
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 361d26077196..e0d64667a4b3 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -352,26 +352,12 @@ static inline void rt_genid_bump_ipv4(struct net *net)
352 atomic_inc(&net->ipv4.rt_genid); 352 atomic_inc(&net->ipv4.rt_genid);
353} 353}
354 354
355#if IS_ENABLED(CONFIG_IPV6) 355extern void (*__fib6_flush_trees)(struct net *net);
356static inline int rt_genid_ipv6(struct net *net)
357{
358 return atomic_read(&net->ipv6.rt_genid);
359}
360
361static inline void rt_genid_bump_ipv6(struct net *net)
362{
363 atomic_inc(&net->ipv6.rt_genid);
364}
365#else
366static inline int rt_genid_ipv6(struct net *net)
367{
368 return 0;
369}
370
371static inline void rt_genid_bump_ipv6(struct net *net) 356static inline void rt_genid_bump_ipv6(struct net *net)
372{ 357{
358 if (__fib6_flush_trees)
359 __fib6_flush_trees(net);
373} 360}
374#endif
375 361
376#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) 362#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
377static inline struct netns_ieee802154_lowpan * 363static inline struct netns_ieee802154_lowpan *
diff --git a/init/Kconfig b/init/Kconfig
index e84c6423a2e5..80a6907f91c5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -811,6 +811,7 @@ config LOG_BUF_SHIFT
811 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" 811 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
812 range 12 21 812 range 12 21
813 default 17 813 default 17
814 depends on PRINTK
814 help 815 help
815 Select the minimal kernel log buffer size as a power of 2. 816 Select the minimal kernel log buffer size as a power of 2.
816 The final size is affected by LOG_CPU_MAX_BUF_SHIFT config 817 The final size is affected by LOG_CPU_MAX_BUF_SHIFT config
@@ -830,6 +831,7 @@ config LOG_CPU_MAX_BUF_SHIFT
830 range 0 21 831 range 0 21
831 default 12 if !BASE_SMALL 832 default 12 if !BASE_SMALL
832 default 0 if BASE_SMALL 833 default 0 if BASE_SMALL
834 depends on PRINTK
833 help 835 help
834 This option allows to increase the default ring buffer size 836 This option allows to increase the default ring buffer size
835 according to the number of CPUs. The value defines the contribution 837 according to the number of CPUs. The value defines the contribution
@@ -1475,6 +1477,7 @@ config FUTEX
1475 1477
1476config HAVE_FUTEX_CMPXCHG 1478config HAVE_FUTEX_CMPXCHG
1477 bool 1479 bool
1480 depends on FUTEX
1478 help 1481 help
1479 Architectures should select this if futex_atomic_cmpxchg_inatomic() 1482 Architectures should select this if futex_atomic_cmpxchg_inatomic()
1480 is implemented and always working. This removes a couple of runtime 1483 is implemented and always working. This removes a couple of runtime
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d640a8b4dcbc..963bf139e2b2 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7948,8 +7948,10 @@ int perf_event_init_task(struct task_struct *child)
7948 7948
7949 for_each_task_context_nr(ctxn) { 7949 for_each_task_context_nr(ctxn) {
7950 ret = perf_event_init_context(child, ctxn); 7950 ret = perf_event_init_context(child, ctxn);
7951 if (ret) 7951 if (ret) {
7952 perf_event_free_task(child);
7952 return ret; 7953 return ret;
7954 }
7953 } 7955 }
7954 7956
7955 return 0; 7957 return 0;
diff --git a/kernel/fork.c b/kernel/fork.c
index 0cf9cdb6e491..a91e47d86de2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1360,7 +1360,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1360 goto bad_fork_cleanup_policy; 1360 goto bad_fork_cleanup_policy;
1361 retval = audit_alloc(p); 1361 retval = audit_alloc(p);
1362 if (retval) 1362 if (retval)
1363 goto bad_fork_cleanup_policy; 1363 goto bad_fork_cleanup_perf;
1364 /* copy all the process information */ 1364 /* copy all the process information */
1365 shm_init_task(p); 1365 shm_init_task(p);
1366 retval = copy_semundo(clone_flags, p); 1366 retval = copy_semundo(clone_flags, p);
@@ -1566,8 +1566,9 @@ bad_fork_cleanup_semundo:
1566 exit_sem(p); 1566 exit_sem(p);
1567bad_fork_cleanup_audit: 1567bad_fork_cleanup_audit:
1568 audit_free(p); 1568 audit_free(p);
1569bad_fork_cleanup_policy: 1569bad_fork_cleanup_perf:
1570 perf_event_free_task(p); 1570 perf_event_free_task(p);
1571bad_fork_cleanup_policy:
1571#ifdef CONFIG_NUMA 1572#ifdef CONFIG_NUMA
1572 mpol_put(p->mempolicy); 1573 mpol_put(p->mempolicy);
1573bad_fork_cleanup_threadgroup_lock: 1574bad_fork_cleanup_threadgroup_lock:
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6223fab9a9d2..8fb52e9bddc1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc)
342 return irq_wait_for_poll(desc); 342 return irq_wait_for_poll(desc);
343} 343}
344 344
345static bool irq_may_run(struct irq_desc *desc)
346{
347 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
348
349 /*
350 * If the interrupt is not in progress and is not an armed
351 * wakeup interrupt, proceed.
352 */
353 if (!irqd_has_set(&desc->irq_data, mask))
354 return true;
355
356 /*
357 * If the interrupt is an armed wakeup source, mark it pending
358 * and suspended, disable it and notify the pm core about the
359 * event.
360 */
361 if (irq_pm_check_wakeup(desc))
362 return false;
363
364 /*
365 * Handle a potential concurrent poll on a different core.
366 */
367 return irq_check_poll(desc);
368}
369
345/** 370/**
346 * handle_simple_irq - Simple and software-decoded IRQs. 371 * handle_simple_irq - Simple and software-decoded IRQs.
347 * @irq: the interrupt number 372 * @irq: the interrupt number
@@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
359{ 384{
360 raw_spin_lock(&desc->lock); 385 raw_spin_lock(&desc->lock);
361 386
362 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 387 if (!irq_may_run(desc))
363 if (!irq_check_poll(desc)) 388 goto out_unlock;
364 goto out_unlock;
365 389
366 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 390 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
367 kstat_incr_irqs_this_cpu(irq, desc); 391 kstat_incr_irqs_this_cpu(irq, desc);
@@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
412 raw_spin_lock(&desc->lock); 436 raw_spin_lock(&desc->lock);
413 mask_ack_irq(desc); 437 mask_ack_irq(desc);
414 438
415 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 439 if (!irq_may_run(desc))
416 if (!irq_check_poll(desc)) 440 goto out_unlock;
417 goto out_unlock;
418 441
419 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 442 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
420 kstat_incr_irqs_this_cpu(irq, desc); 443 kstat_incr_irqs_this_cpu(irq, desc);
@@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
485 508
486 raw_spin_lock(&desc->lock); 509 raw_spin_lock(&desc->lock);
487 510
488 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 511 if (!irq_may_run(desc))
489 if (!irq_check_poll(desc)) 512 goto out;
490 goto out;
491 513
492 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 514 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
493 kstat_incr_irqs_this_cpu(irq, desc); 515 kstat_incr_irqs_this_cpu(irq, desc);
@@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
541 raw_spin_lock(&desc->lock); 563 raw_spin_lock(&desc->lock);
542 564
543 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 565 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
566
567 if (!irq_may_run(desc)) {
568 desc->istate |= IRQS_PENDING;
569 mask_ack_irq(desc);
570 goto out_unlock;
571 }
572
544 /* 573 /*
545 * If we're currently running this IRQ, or its disabled, 574 * If its disabled or no action available then mask it and get
546 * we shouldn't process the IRQ. Mark it pending, handle 575 * out of here.
547 * the necessary masking and go out
548 */ 576 */
549 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 577 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
550 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 578 desc->istate |= IRQS_PENDING;
551 if (!irq_check_poll(desc)) { 579 mask_ack_irq(desc);
552 desc->istate |= IRQS_PENDING; 580 goto out_unlock;
553 mask_ack_irq(desc);
554 goto out_unlock;
555 }
556 } 581 }
582
557 kstat_incr_irqs_this_cpu(irq, desc); 583 kstat_incr_irqs_this_cpu(irq, desc);
558 584
559 /* Start handling the irq */ 585 /* Start handling the irq */
@@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
602 raw_spin_lock(&desc->lock); 628 raw_spin_lock(&desc->lock);
603 629
604 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 630 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
631
632 if (!irq_may_run(desc)) {
633 desc->istate |= IRQS_PENDING;
634 goto out_eoi;
635 }
636
605 /* 637 /*
606 * If we're currently running this IRQ, or its disabled, 638 * If its disabled or no action available then mask it and get
607 * we shouldn't process the IRQ. Mark it pending, handle 639 * out of here.
608 * the necessary masking and go out
609 */ 640 */
610 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 641 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
611 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 642 desc->istate |= IRQS_PENDING;
612 if (!irq_check_poll(desc)) { 643 goto out_eoi;
613 desc->istate |= IRQS_PENDING;
614 goto out_eoi;
615 }
616 } 644 }
645
617 kstat_incr_irqs_this_cpu(irq, desc); 646 kstat_incr_irqs_this_cpu(irq, desc);
618 647
619 do { 648 do {
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 099ea2e0eb88..4332d766619d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -63,8 +63,8 @@ enum {
63 63
64extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 64extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
65 unsigned long flags); 65 unsigned long flags);
66extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 66extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
67extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 67extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
68 68
69extern int irq_startup(struct irq_desc *desc, bool resend); 69extern int irq_startup(struct irq_desc *desc, bool resend);
70extern void irq_shutdown(struct irq_desc *desc); 70extern void irq_shutdown(struct irq_desc *desc);
@@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d
194 __this_cpu_inc(*desc->kstat_irqs); 194 __this_cpu_inc(*desc->kstat_irqs);
195 __this_cpu_inc(kstat.irqs_sum); 195 __this_cpu_inc(kstat.irqs_sum);
196} 196}
197
198#ifdef CONFIG_PM_SLEEP
199bool irq_pm_check_wakeup(struct irq_desc *desc);
200void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
201void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
202#else
203static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; }
204static inline void
205irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
206static inline void
207irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
208#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3dc6a61bf06a..0a9104b4608b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
382} 382}
383#endif 383#endif
384 384
385void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 385void __disable_irq(struct irq_desc *desc, unsigned int irq)
386{ 386{
387 if (suspend) {
388 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
389 return;
390 desc->istate |= IRQS_SUSPENDED;
391 }
392
393 if (!desc->depth++) 387 if (!desc->depth++)
394 irq_disable(desc); 388 irq_disable(desc);
395} 389}
@@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq)
401 395
402 if (!desc) 396 if (!desc)
403 return -EINVAL; 397 return -EINVAL;
404 __disable_irq(desc, irq, false); 398 __disable_irq(desc, irq);
405 irq_put_desc_busunlock(desc, flags); 399 irq_put_desc_busunlock(desc, flags);
406 return 0; 400 return 0;
407} 401}
@@ -442,20 +436,8 @@ void disable_irq(unsigned int irq)
442} 436}
443EXPORT_SYMBOL(disable_irq); 437EXPORT_SYMBOL(disable_irq);
444 438
445void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 439void __enable_irq(struct irq_desc *desc, unsigned int irq)
446{ 440{
447 if (resume) {
448 if (!(desc->istate & IRQS_SUSPENDED)) {
449 if (!desc->action)
450 return;
451 if (!(desc->action->flags & IRQF_FORCE_RESUME))
452 return;
453 /* Pretend that it got disabled ! */
454 desc->depth++;
455 }
456 desc->istate &= ~IRQS_SUSPENDED;
457 }
458
459 switch (desc->depth) { 441 switch (desc->depth) {
460 case 0: 442 case 0:
461 err_out: 443 err_out:
@@ -497,7 +479,7 @@ void enable_irq(unsigned int irq)
497 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 479 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
498 goto out; 480 goto out;
499 481
500 __enable_irq(desc, irq, false); 482 __enable_irq(desc, irq);
501out: 483out:
502 irq_put_desc_busunlock(desc, flags); 484 irq_put_desc_busunlock(desc, flags);
503} 485}
@@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1218 new->irq = irq; 1200 new->irq = irq;
1219 *old_ptr = new; 1201 *old_ptr = new;
1220 1202
1203 irq_pm_install_action(desc, new);
1204
1221 /* Reset broken irq detection when installing new handler */ 1205 /* Reset broken irq detection when installing new handler */
1222 desc->irq_count = 0; 1206 desc->irq_count = 0;
1223 desc->irqs_unhandled = 0; 1207 desc->irqs_unhandled = 0;
@@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1228 */ 1212 */
1229 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1213 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1230 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1214 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1231 __enable_irq(desc, irq, false); 1215 __enable_irq(desc, irq);
1232 } 1216 }
1233 1217
1234 raw_spin_unlock_irqrestore(&desc->lock, flags); 1218 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1336 /* Found it - now remove it from the list of entries: */ 1320 /* Found it - now remove it from the list of entries: */
1337 *action_ptr = action->next; 1321 *action_ptr = action->next;
1338 1322
1323 irq_pm_remove_action(desc, action);
1324
1339 /* If this was the last handler, shut down the IRQ line: */ 1325 /* If this was the last handler, shut down the IRQ line: */
1340 if (!desc->action) { 1326 if (!desc->action) {
1341 irq_shutdown(desc); 1327 irq_shutdown(desc);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index abcd6ca86cb7..3ca532592704 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -9,17 +9,105 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/suspend.h>
12#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
13 14
14#include "internals.h" 15#include "internals.h"
15 16
17bool irq_pm_check_wakeup(struct irq_desc *desc)
18{
19 if (irqd_is_wakeup_armed(&desc->irq_data)) {
20 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
21 desc->istate |= IRQS_SUSPENDED | IRQS_PENDING;
22 desc->depth++;
23 irq_disable(desc);
24 pm_system_wakeup();
25 return true;
26 }
27 return false;
28}
29
30/*
31 * Called from __setup_irq() with desc->lock held after @action has
32 * been installed in the action chain.
33 */
34void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
35{
36 desc->nr_actions++;
37
38 if (action->flags & IRQF_FORCE_RESUME)
39 desc->force_resume_depth++;
40
41 WARN_ON_ONCE(desc->force_resume_depth &&
42 desc->force_resume_depth != desc->nr_actions);
43
44 if (action->flags & IRQF_NO_SUSPEND)
45 desc->no_suspend_depth++;
46
47 WARN_ON_ONCE(desc->no_suspend_depth &&
48 desc->no_suspend_depth != desc->nr_actions);
49}
50
51/*
52 * Called from __free_irq() with desc->lock held after @action has
53 * been removed from the action chain.
54 */
55void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
56{
57 desc->nr_actions--;
58
59 if (action->flags & IRQF_FORCE_RESUME)
60 desc->force_resume_depth--;
61
62 if (action->flags & IRQF_NO_SUSPEND)
63 desc->no_suspend_depth--;
64}
65
66static bool suspend_device_irq(struct irq_desc *desc, int irq)
67{
68 if (!desc->action || desc->no_suspend_depth)
69 return false;
70
71 if (irqd_is_wakeup_set(&desc->irq_data)) {
72 irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
73 /*
74 * We return true here to force the caller to issue
75 * synchronize_irq(). We need to make sure that the
76 * IRQD_WAKEUP_ARMED is visible before we return from
77 * suspend_device_irqs().
78 */
79 return true;
80 }
81
82 desc->istate |= IRQS_SUSPENDED;
83 __disable_irq(desc, irq);
84
85 /*
86 * Hardware which has no wakeup source configuration facility
87 * requires that the non wakeup interrupts are masked at the
88 * chip level. The chip implementation indicates that with
89 * IRQCHIP_MASK_ON_SUSPEND.
90 */
91 if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
92 mask_irq(desc);
93 return true;
94}
95
16/** 96/**
17 * suspend_device_irqs - disable all currently enabled interrupt lines 97 * suspend_device_irqs - disable all currently enabled interrupt lines
18 * 98 *
19 * During system-wide suspend or hibernation device drivers need to be prevented 99 * During system-wide suspend or hibernation device drivers need to be
20 * from receiving interrupts and this function is provided for this purpose. 100 * prevented from receiving interrupts and this function is provided
21 * It marks all interrupt lines in use, except for the timer ones, as disabled 101 * for this purpose.
22 * and sets the IRQS_SUSPENDED flag for each of them. 102 *
103 * So we disable all interrupts and mark them IRQS_SUSPENDED except
104 * for those which are unused, those which are marked as not
105 * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND
106 * set and those which are marked as active wakeup sources.
107 *
108 * The active wakeup sources are handled by the flow handler entry
109 * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the
110 * interrupt and notifies the pm core about the wakeup.
23 */ 111 */
24void suspend_device_irqs(void) 112void suspend_device_irqs(void)
25{ 113{
@@ -28,18 +116,36 @@ void suspend_device_irqs(void)
28 116
29 for_each_irq_desc(irq, desc) { 117 for_each_irq_desc(irq, desc) {
30 unsigned long flags; 118 unsigned long flags;
119 bool sync;
31 120
32 raw_spin_lock_irqsave(&desc->lock, flags); 121 raw_spin_lock_irqsave(&desc->lock, flags);
33 __disable_irq(desc, irq, true); 122 sync = suspend_device_irq(desc, irq);
34 raw_spin_unlock_irqrestore(&desc->lock, flags); 123 raw_spin_unlock_irqrestore(&desc->lock, flags);
35 }
36 124
37 for_each_irq_desc(irq, desc) 125 if (sync)
38 if (desc->istate & IRQS_SUSPENDED)
39 synchronize_irq(irq); 126 synchronize_irq(irq);
127 }
40} 128}
41EXPORT_SYMBOL_GPL(suspend_device_irqs); 129EXPORT_SYMBOL_GPL(suspend_device_irqs);
42 130
131static void resume_irq(struct irq_desc *desc, int irq)
132{
133 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
134
135 if (desc->istate & IRQS_SUSPENDED)
136 goto resume;
137
138 /* Force resume the interrupt? */
139 if (!desc->force_resume_depth)
140 return;
141
142 /* Pretend that it got disabled ! */
143 desc->depth++;
144resume:
145 desc->istate &= ~IRQS_SUSPENDED;
146 __enable_irq(desc, irq);
147}
148
43static void resume_irqs(bool want_early) 149static void resume_irqs(bool want_early)
44{ 150{
45 struct irq_desc *desc; 151 struct irq_desc *desc;
@@ -54,7 +160,7 @@ static void resume_irqs(bool want_early)
54 continue; 160 continue;
55 161
56 raw_spin_lock_irqsave(&desc->lock, flags); 162 raw_spin_lock_irqsave(&desc->lock, flags);
57 __enable_irq(desc, irq, true); 163 resume_irq(desc, irq);
58 raw_spin_unlock_irqrestore(&desc->lock, flags); 164 raw_spin_unlock_irqrestore(&desc->lock, flags);
59 } 165 }
60} 166}
@@ -93,38 +199,3 @@ void resume_device_irqs(void)
93 resume_irqs(false); 199 resume_irqs(false);
94} 200}
95EXPORT_SYMBOL_GPL(resume_device_irqs); 201EXPORT_SYMBOL_GPL(resume_device_irqs);
96
97/**
98 * check_wakeup_irqs - check if any wake-up interrupts are pending
99 */
100int check_wakeup_irqs(void)
101{
102 struct irq_desc *desc;
103 int irq;
104
105 for_each_irq_desc(irq, desc) {
106 /*
107 * Only interrupts which are marked as wakeup source
108 * and have not been disabled before the suspend check
109 * can abort suspend.
110 */
111 if (irqd_is_wakeup_set(&desc->irq_data)) {
112 if (desc->depth == 1 && desc->istate & IRQS_PENDING)
113 return -EBUSY;
114 continue;
115 }
116 /*
117 * Check the non wakeup interrupts whether they need
118 * to be masked before finally going into suspend
119 * state. That's for hardware which has no wakeup
120 * source configuration facility. The chip
121 * implementation indicates that with
122 * IRQCHIP_MASK_ON_SUSPEND.
123 */
124 if (desc->istate & IRQS_SUSPENDED &&
125 irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
126 mask_irq(desc);
127 }
128
129 return 0;
130}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 4ee194eb524b..7b323221b9ee 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -129,6 +129,7 @@ int freeze_processes(void)
129 if (!pm_freezing) 129 if (!pm_freezing)
130 atomic_inc(&system_freezing_cnt); 130 atomic_inc(&system_freezing_cnt);
131 131
132 pm_wakeup_clear();
132 printk("Freezing user space processes ... "); 133 printk("Freezing user space processes ... ");
133 pm_freezing = true; 134 pm_freezing = true;
134 error = try_to_freeze_tasks(true); 135 error = try_to_freeze_tasks(true);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index b38fb2b9e237..2d75c94ae87d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3359,7 +3359,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
3359 iter->head = cpu_buffer->reader_page->read; 3359 iter->head = cpu_buffer->reader_page->read;
3360 3360
3361 iter->cache_reader_page = iter->head_page; 3361 iter->cache_reader_page = iter->head_page;
3362 iter->cache_read = iter->head; 3362 iter->cache_read = cpu_buffer->read;
3363 3363
3364 if (iter->head) 3364 if (iter->head)
3365 iter->read_stamp = cpu_buffer->read_stamp; 3365 iter->read_stamp = cpu_buffer->read_stamp;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 7b36e4d40ed7..16d02639d334 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -588,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
588 * rhashtable_destroy - destroy hash table 588 * rhashtable_destroy - destroy hash table
589 * @ht: the hash table to destroy 589 * @ht: the hash table to destroy
590 * 590 *
591 * Frees the bucket array. 591 * Frees the bucket array. This function is not rcu safe, therefore the caller
592 * has to make sure that no resizing may happen by unpublishing the hashtable
593 * and waiting for the quiescent cycle before releasing the bucket array.
592 */ 594 */
593void rhashtable_destroy(const struct rhashtable *ht) 595void rhashtable_destroy(const struct rhashtable *ht)
594{ 596{
595 const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 597 bucket_table_free(ht->tbl);
596
597 bucket_table_free(tbl);
598} 598}
599EXPORT_SYMBOL_GPL(rhashtable_destroy); 599EXPORT_SYMBOL_GPL(rhashtable_destroy);
600 600
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d9a21d06b862..f8ffd9412ec5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1795,14 +1795,17 @@ static int __split_huge_page_map(struct page *page,
1795 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { 1795 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1796 pte_t *pte, entry; 1796 pte_t *pte, entry;
1797 BUG_ON(PageCompound(page+i)); 1797 BUG_ON(PageCompound(page+i));
1798 /*
1799 * Note that pmd_numa is not transferred deliberately
1800 * to avoid any possibility that pte_numa leaks to
1801 * a PROT_NONE VMA by accident.
1802 */
1798 entry = mk_pte(page + i, vma->vm_page_prot); 1803 entry = mk_pte(page + i, vma->vm_page_prot);
1799 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1804 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1800 if (!pmd_write(*pmd)) 1805 if (!pmd_write(*pmd))
1801 entry = pte_wrprotect(entry); 1806 entry = pte_wrprotect(entry);
1802 if (!pmd_young(*pmd)) 1807 if (!pmd_young(*pmd))
1803 entry = pte_mkold(entry); 1808 entry = pte_mkold(entry);
1804 if (pmd_numa(*pmd))
1805 entry = pte_mknuma(entry);
1806 pte = pte_offset_map(&_pmd, haddr); 1809 pte = pte_offset_map(&_pmd, haddr);
1807 BUG_ON(!pte_none(*pte)); 1810 BUG_ON(!pte_none(*pte));
1808 set_pte_at(mm, haddr, pte, entry); 1811 set_pte_at(mm, haddr, pte, entry);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 085dc6d2f876..28928ce9b07f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -292,6 +292,9 @@ struct mem_cgroup {
292 /* vmpressure notifications */ 292 /* vmpressure notifications */
293 struct vmpressure vmpressure; 293 struct vmpressure vmpressure;
294 294
295 /* css_online() has been completed */
296 int initialized;
297
295 /* 298 /*
296 * the counter to account for mem+swap usage. 299 * the counter to account for mem+swap usage.
297 */ 300 */
@@ -1099,10 +1102,21 @@ skip_node:
1099 * skipping css reference should be safe. 1102 * skipping css reference should be safe.
1100 */ 1103 */
1101 if (next_css) { 1104 if (next_css) {
1102 if ((next_css == &root->css) || 1105 struct mem_cgroup *memcg = mem_cgroup_from_css(next_css);
1103 ((next_css->flags & CSS_ONLINE) && 1106
1104 css_tryget_online(next_css))) 1107 if (next_css == &root->css)
1105 return mem_cgroup_from_css(next_css); 1108 return memcg;
1109
1110 if (css_tryget_online(next_css)) {
1111 /*
1112 * Make sure the memcg is initialized:
1113 * mem_cgroup_css_online() orders the the
1114 * initialization against setting the flag.
1115 */
1116 if (smp_load_acquire(&memcg->initialized))
1117 return memcg;
1118 css_put(next_css);
1119 }
1106 1120
1107 prev_css = next_css; 1121 prev_css = next_css;
1108 goto skip_node; 1122 goto skip_node;
@@ -5549,6 +5563,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
5549{ 5563{
5550 struct mem_cgroup *memcg = mem_cgroup_from_css(css); 5564 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5551 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent); 5565 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
5566 int ret;
5552 5567
5553 if (css->id > MEM_CGROUP_ID_MAX) 5568 if (css->id > MEM_CGROUP_ID_MAX)
5554 return -ENOSPC; 5569 return -ENOSPC;
@@ -5585,7 +5600,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
5585 } 5600 }
5586 mutex_unlock(&memcg_create_mutex); 5601 mutex_unlock(&memcg_create_mutex);
5587 5602
5588 return memcg_init_kmem(memcg, &memory_cgrp_subsys); 5603 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
5604 if (ret)
5605 return ret;
5606
5607 /*
5608 * Make sure the memcg is initialized: mem_cgroup_iter()
5609 * orders reading memcg->initialized against its callers
5610 * reading the memcg members.
5611 */
5612 smp_store_release(&memcg->initialized, 1);
5613
5614 return 0;
5589} 5615}
5590 5616
5591/* 5617/*
diff --git a/mm/migrate.c b/mm/migrate.c
index f78ec9bd454d..2740360cd216 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -146,8 +146,11 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
146 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 146 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
147 if (pte_swp_soft_dirty(*ptep)) 147 if (pte_swp_soft_dirty(*ptep))
148 pte = pte_mksoft_dirty(pte); 148 pte = pte_mksoft_dirty(pte);
149
150 /* Recheck VMA as permissions can change since migration started */
149 if (is_write_migration_entry(entry)) 151 if (is_write_migration_entry(entry))
150 pte = pte_mkwrite(pte); 152 pte = maybe_mkwrite(pte, vma);
153
151#ifdef CONFIG_HUGETLB_PAGE 154#ifdef CONFIG_HUGETLB_PAGE
152 if (PageHuge(new)) { 155 if (PageHuge(new)) {
153 pte = pte_mkhuge(pte); 156 pte = pte_mkhuge(pte);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 18cee0d4c8a2..eee961958021 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1612,7 +1612,7 @@ again:
1612 } 1612 }
1613 1613
1614 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1614 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
1615 if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && 1615 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
1616 !zone_is_fair_depleted(zone)) 1616 !zone_is_fair_depleted(zone))
1617 zone_set_flag(zone, ZONE_FAIR_DEPLETED); 1617 zone_set_flag(zone, ZONE_FAIR_DEPLETED);
1618 1618
@@ -5701,9 +5701,8 @@ static void __setup_per_zone_wmarks(void)
5701 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5701 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5702 5702
5703 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 5703 __mod_zone_page_state(zone, NR_ALLOC_BATCH,
5704 high_wmark_pages(zone) - 5704 high_wmark_pages(zone) - low_wmark_pages(zone) -
5705 low_wmark_pages(zone) - 5705 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
5706 zone_page_state(zone, NR_ALLOC_BATCH));
5707 5706
5708 setup_zone_migrate_reserve(zone); 5707 setup_zone_migrate_reserve(zone);
5709 spin_unlock_irqrestore(&zone->lock, flags); 5708 spin_unlock_irqrestore(&zone->lock, flags);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index da1378a3e2c7..8d289697cc7a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3152,6 +3152,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3152 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; 3152 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3153 goto done; 3153 goto done;
3154 } 3154 }
3155 /* switch back to head shinfo */
3156 pinfo = skb_shinfo(p);
3157
3155 if (pinfo->frag_list) 3158 if (pinfo->frag_list)
3156 goto merge; 3159 goto merge;
3157 if (skb_gro_len(p) != pinfo->gso_size) 3160 if (skb_gro_len(p) != pinfo->gso_size)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd41dd1948b6..bda4bb8ae260 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -764,9 +764,14 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
764 764
765 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type); 765 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
766 766
767 if (!t && (cmd == SIOCADDTUNNEL)) { 767 if (cmd == SIOCADDTUNNEL) {
768 t = ip_tunnel_create(net, itn, p); 768 if (!t) {
769 err = PTR_ERR_OR_ZERO(t); 769 t = ip_tunnel_create(net, itn, p);
770 err = PTR_ERR_OR_ZERO(t);
771 break;
772 }
773
774 err = -EEXIST;
770 break; 775 break;
771 } 776 }
772 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 777 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 173e7ea54c70..cbadb942c332 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -746,7 +746,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
746 } 746 }
747 747
748 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); 748 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
749 if (n) { 749 if (!IS_ERR(n)) {
750 if (!(n->nud_state & NUD_VALID)) { 750 if (!(n->nud_state & NUD_VALID)) {
751 neigh_event_send(n, NULL); 751 neigh_event_send(n, NULL);
752 } else { 752 } else {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3342ee64f2e3..3e118dfddd02 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4780,10 +4780,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4780 4780
4781 if (ip6_del_rt(ifp->rt)) 4781 if (ip6_del_rt(ifp->rt))
4782 dst_free(&ifp->rt->dst); 4782 dst_free(&ifp->rt->dst);
4783
4784 rt_genid_bump_ipv6(net);
4783 break; 4785 break;
4784 } 4786 }
4785 atomic_inc(&net->ipv6.dev_addr_genid); 4787 atomic_inc(&net->ipv6.dev_addr_genid);
4786 rt_genid_bump_ipv6(net);
4787} 4788}
4788 4789
4789static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 4790static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index e6960457f625..98cc4cd570e2 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -8,6 +8,13 @@
8#include <net/addrconf.h> 8#include <net/addrconf.h>
9#include <net/ip.h> 9#include <net/ip.h>
10 10
11/* if ipv6 module registers this function is used by xfrm to force all
12 * sockets to relookup their nodes - this is fairly expensive, be
13 * careful
14 */
15void (*__fib6_flush_trees)(struct net *);
16EXPORT_SYMBOL(__fib6_flush_trees);
17
11#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) 18#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
12 19
13static inline unsigned int ipv6_addr_scope2type(unsigned int scope) 20static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 76b7f5ee8f4c..97b9fa8de377 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1605,6 +1605,24 @@ static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1605 fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL); 1605 fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
1606} 1606}
1607 1607
1608static int fib6_update_sernum(struct rt6_info *rt, void *arg)
1609{
1610 __u32 sernum = *(__u32 *)arg;
1611
1612 if (rt->rt6i_node &&
1613 rt->rt6i_node->fn_sernum != sernum)
1614 rt->rt6i_node->fn_sernum = sernum;
1615
1616 return 0;
1617}
1618
1619static void fib6_flush_trees(struct net *net)
1620{
1621 __u32 new_sernum = fib6_new_sernum();
1622
1623 fib6_clean_all(net, fib6_update_sernum, &new_sernum);
1624}
1625
1608/* 1626/*
1609 * Garbage collection 1627 * Garbage collection
1610 */ 1628 */
@@ -1788,6 +1806,8 @@ int __init fib6_init(void)
1788 NULL); 1806 NULL);
1789 if (ret) 1807 if (ret)
1790 goto out_unregister_subsys; 1808 goto out_unregister_subsys;
1809
1810 __fib6_flush_trees = fib6_flush_trees;
1791out: 1811out:
1792 return ret; 1812 return ret;
1793 1813
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 5f19dfbc4c6a..f304471477dc 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -314,6 +314,8 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
314 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id); 314 struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
315 315
316 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE); 316 t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
317 if (t && create)
318 return NULL;
317 if (t || !create) 319 if (t || !create)
318 return t; 320 return t;
319 321
@@ -1724,4 +1726,5 @@ MODULE_LICENSE("GPL");
1724MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 1726MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
1725MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); 1727MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
1726MODULE_ALIAS_RTNL_LINK("ip6gre"); 1728MODULE_ALIAS_RTNL_LINK("ip6gre");
1729MODULE_ALIAS_RTNL_LINK("ip6gretap");
1727MODULE_ALIAS_NETDEV("ip6gre0"); 1730MODULE_ALIAS_NETDEV("ip6gre0");
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index f9de5a695072..69a84b464009 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -364,8 +364,12 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
364 (t = rtnl_dereference(*tp)) != NULL; 364 (t = rtnl_dereference(*tp)) != NULL;
365 tp = &t->next) { 365 tp = &t->next) {
366 if (ipv6_addr_equal(local, &t->parms.laddr) && 366 if (ipv6_addr_equal(local, &t->parms.laddr) &&
367 ipv6_addr_equal(remote, &t->parms.raddr)) 367 ipv6_addr_equal(remote, &t->parms.raddr)) {
368 if (create)
369 return NULL;
370
368 return t; 371 return t;
372 }
369 } 373 }
370 if (!create) 374 if (!create)
371 return NULL; 375 return NULL;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 7f52fd9fa7b0..5833a2244467 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -253,8 +253,12 @@ static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
253 (t = rtnl_dereference(*tp)) != NULL; 253 (t = rtnl_dereference(*tp)) != NULL;
254 tp = &t->next) { 254 tp = &t->next) {
255 if (ipv6_addr_equal(local, &t->parms.laddr) && 255 if (ipv6_addr_equal(local, &t->parms.laddr) &&
256 ipv6_addr_equal(remote, &t->parms.raddr)) 256 ipv6_addr_equal(remote, &t->parms.raddr)) {
257 if (create)
258 return NULL;
259
257 return t; 260 return t;
261 }
258 } 262 }
259 if (!create) 263 if (!create)
260 return NULL; 264 return NULL;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index f23fbd28a501..bafde82324c5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -314,7 +314,6 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
314 314
315 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); 315 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
316 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); 316 rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
317 rt->rt6i_genid = rt_genid_ipv6(net);
318 INIT_LIST_HEAD(&rt->rt6i_siblings); 317 INIT_LIST_HEAD(&rt->rt6i_siblings);
319 } 318 }
320 return rt; 319 return rt;
@@ -1098,9 +1097,6 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1098 * DST_OBSOLETE_FORCE_CHK which forces validation calls down 1097 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1099 * into this function always. 1098 * into this function always.
1100 */ 1099 */
1101 if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
1102 return NULL;
1103
1104 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) 1100 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1105 return NULL; 1101 return NULL;
1106 1102
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index b5c1d3aadb41..6d77cce481d5 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY
847 tristate '"TPROXY" target transparent proxying support' 847 tristate '"TPROXY" target transparent proxying support'
848 depends on NETFILTER_XTABLES 848 depends on NETFILTER_XTABLES
849 depends on NETFILTER_ADVANCED 849 depends on NETFILTER_ADVANCED
850 depends on (IPV6 || IPV6=n)
850 depends on IP_NF_MANGLE 851 depends on IP_NF_MANGLE
851 select NF_DEFRAG_IPV4 852 select NF_DEFRAG_IPV4
852 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES 853 select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index c138b8fbe280..f37f0716a9fc 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -222,6 +222,51 @@ replay:
222 } 222 }
223} 223}
224 224
225struct nfnl_err {
226 struct list_head head;
227 struct nlmsghdr *nlh;
228 int err;
229};
230
231static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
232{
233 struct nfnl_err *nfnl_err;
234
235 nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
236 if (nfnl_err == NULL)
237 return -ENOMEM;
238
239 nfnl_err->nlh = nlh;
240 nfnl_err->err = err;
241 list_add_tail(&nfnl_err->head, list);
242
243 return 0;
244}
245
246static void nfnl_err_del(struct nfnl_err *nfnl_err)
247{
248 list_del(&nfnl_err->head);
249 kfree(nfnl_err);
250}
251
252static void nfnl_err_reset(struct list_head *err_list)
253{
254 struct nfnl_err *nfnl_err, *next;
255
256 list_for_each_entry_safe(nfnl_err, next, err_list, head)
257 nfnl_err_del(nfnl_err);
258}
259
260static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
261{
262 struct nfnl_err *nfnl_err, *next;
263
264 list_for_each_entry_safe(nfnl_err, next, err_list, head) {
265 netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
266 nfnl_err_del(nfnl_err);
267 }
268}
269
225static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, 270static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
226 u_int16_t subsys_id) 271 u_int16_t subsys_id)
227{ 272{
@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
230 const struct nfnetlink_subsystem *ss; 275 const struct nfnetlink_subsystem *ss;
231 const struct nfnl_callback *nc; 276 const struct nfnl_callback *nc;
232 bool success = true, done = false; 277 bool success = true, done = false;
278 static LIST_HEAD(err_list);
233 int err; 279 int err;
234 280
235 if (subsys_id >= NFNL_SUBSYS_COUNT) 281 if (subsys_id >= NFNL_SUBSYS_COUNT)
@@ -287,6 +333,7 @@ replay:
287 type = nlh->nlmsg_type; 333 type = nlh->nlmsg_type;
288 if (type == NFNL_MSG_BATCH_BEGIN) { 334 if (type == NFNL_MSG_BATCH_BEGIN) {
289 /* Malformed: Batch begin twice */ 335 /* Malformed: Batch begin twice */
336 nfnl_err_reset(&err_list);
290 success = false; 337 success = false;
291 goto done; 338 goto done;
292 } else if (type == NFNL_MSG_BATCH_END) { 339 } else if (type == NFNL_MSG_BATCH_END) {
@@ -333,6 +380,7 @@ replay:
333 * original skb. 380 * original skb.
334 */ 381 */
335 if (err == -EAGAIN) { 382 if (err == -EAGAIN) {
383 nfnl_err_reset(&err_list);
336 ss->abort(skb); 384 ss->abort(skb);
337 nfnl_unlock(subsys_id); 385 nfnl_unlock(subsys_id);
338 kfree_skb(nskb); 386 kfree_skb(nskb);
@@ -341,11 +389,24 @@ replay:
341 } 389 }
342ack: 390ack:
343 if (nlh->nlmsg_flags & NLM_F_ACK || err) { 391 if (nlh->nlmsg_flags & NLM_F_ACK || err) {
392 /* Errors are delivered once the full batch has been
393 * processed, this avoids that the same error is
394 * reported several times when replaying the batch.
395 */
396 if (nfnl_err_add(&err_list, nlh, err) < 0) {
397 /* We failed to enqueue an error, reset the
398 * list of errors and send OOM to userspace
399 * pointing to the batch header.
400 */
401 nfnl_err_reset(&err_list);
402 netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
403 success = false;
404 goto done;
405 }
344 /* We don't stop processing the batch on errors, thus, 406 /* We don't stop processing the batch on errors, thus,
345 * userspace gets all the errors that the batch 407 * userspace gets all the errors that the batch
346 * triggers. 408 * triggers.
347 */ 409 */
348 netlink_ack(skb, nlh, err);
349 if (err) 410 if (err)
350 success = false; 411 success = false;
351 } 412 }
@@ -361,6 +422,7 @@ done:
361 else 422 else
362 ss->abort(skb); 423 ss->abort(skb);
363 424
425 nfnl_err_deliver(&err_list, oskb);
364 nfnl_unlock(subsys_id); 426 nfnl_unlock(subsys_id);
365 kfree_skb(nskb); 427 kfree_skb(nskb);
366} 428}
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index 28fb8f38e6ba..8892b7b6184a 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set,
180static void nft_hash_destroy(const struct nft_set *set) 180static void nft_hash_destroy(const struct nft_set *set)
181{ 181{
182 const struct rhashtable *priv = nft_set_priv(set); 182 const struct rhashtable *priv = nft_set_priv(set);
183 const struct bucket_table *tbl; 183 const struct bucket_table *tbl = priv->tbl;
184 struct nft_hash_elem *he, *next; 184 struct nft_hash_elem *he, *next;
185 unsigned int i; 185 unsigned int i;
186 186
187 tbl = rht_dereference(priv->tbl, priv); 187 for (i = 0; i < tbl->size; i++) {
188 for (i = 0; i < tbl->size; i++) 188 for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
189 rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node) 189 he != NULL; he = next) {
190 next = rht_entry(he->node.next, struct nft_hash_elem, node);
190 nft_hash_elem_destroy(set, he); 191 nft_hash_elem_destroy(set, he);
191 192 }
193 }
192 rhashtable_destroy(priv); 194 rhashtable_destroy(priv);
193} 195}
194 196
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index e1836ff88199..46214f245665 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set)
234 struct nft_rbtree_elem *rbe; 234 struct nft_rbtree_elem *rbe;
235 struct rb_node *node; 235 struct rb_node *node;
236 236
237 spin_lock_bh(&nft_rbtree_lock);
238 while ((node = priv->root.rb_node) != NULL) { 237 while ((node = priv->root.rb_node) != NULL) {
239 rb_erase(node, &priv->root); 238 rb_erase(node, &priv->root);
240 rbe = rb_entry(node, struct nft_rbtree_elem, node); 239 rbe = rb_entry(node, struct nft_rbtree_elem, node);
241 nft_rbtree_elem_destroy(set, rbe); 240 nft_rbtree_elem_destroy(set, rbe);
242 } 241 }
243 spin_unlock_bh(&nft_rbtree_lock);
244} 242}
245 243
246static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features, 244static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 3a633debb6df..ad57f4444b9c 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -526,9 +526,11 @@ pop_stack:
526 match_idx = stack[--stackp]; 526 match_idx = stack[--stackp];
527 cur_match = tcf_em_get_match(tree, match_idx); 527 cur_match = tcf_em_get_match(tree, match_idx);
528 528
529 if (tcf_em_early_end(cur_match, res)) 529 if (tcf_em_early_end(cur_match, res)) {
530 if (tcf_em_is_inverted(cur_match))
531 res = !res;
530 goto pop_stack; 532 goto pop_stack;
531 else { 533 } else {
532 match_idx++; 534 match_idx++;
533 goto proceed; 535 goto proceed;
534 } 536 }
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index e4f6102efc1a..b86b426f159d 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -51,7 +51,7 @@ static struct reg_default rt286_index_def[] = {
51 { 0x04, 0xaf01 }, 51 { 0x04, 0xaf01 },
52 { 0x08, 0x000d }, 52 { 0x08, 0x000d },
53 { 0x09, 0xd810 }, 53 { 0x09, 0xd810 },
54 { 0x0a, 0x0060 }, 54 { 0x0a, 0x0120 },
55 { 0x0b, 0x0000 }, 55 { 0x0b, 0x0000 },
56 { 0x0d, 0x2800 }, 56 { 0x0d, 0x2800 },
57 { 0x0f, 0x0000 }, 57 { 0x0f, 0x0000 },
@@ -60,7 +60,7 @@ static struct reg_default rt286_index_def[] = {
60 { 0x33, 0x0208 }, 60 { 0x33, 0x0208 },
61 { 0x49, 0x0004 }, 61 { 0x49, 0x0004 },
62 { 0x4f, 0x50e9 }, 62 { 0x4f, 0x50e9 },
63 { 0x50, 0x2c00 }, 63 { 0x50, 0x2000 },
64 { 0x63, 0x2902 }, 64 { 0x63, 0x2902 },
65 { 0x67, 0x1111 }, 65 { 0x67, 0x1111 },
66 { 0x68, 0x1016 }, 66 { 0x68, 0x1016 },
@@ -104,7 +104,6 @@ static const struct reg_default rt286_reg[] = {
104 { 0x02170700, 0x00000000 }, 104 { 0x02170700, 0x00000000 },
105 { 0x02270100, 0x00000000 }, 105 { 0x02270100, 0x00000000 },
106 { 0x02370100, 0x00000000 }, 106 { 0x02370100, 0x00000000 },
107 { 0x02040000, 0x00004002 },
108 { 0x01870700, 0x00000020 }, 107 { 0x01870700, 0x00000020 },
109 { 0x00830000, 0x000000c3 }, 108 { 0x00830000, 0x000000c3 },
110 { 0x00930000, 0x000000c3 }, 109 { 0x00930000, 0x000000c3 },
@@ -192,7 +191,6 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value)
192 /*handle index registers*/ 191 /*handle index registers*/
193 if (reg <= 0xff) { 192 if (reg <= 0xff) {
194 rt286_hw_write(client, RT286_COEF_INDEX, reg); 193 rt286_hw_write(client, RT286_COEF_INDEX, reg);
195 reg = RT286_PROC_COEF;
196 for (i = 0; i < INDEX_CACHE_SIZE; i++) { 194 for (i = 0; i < INDEX_CACHE_SIZE; i++) {
197 if (reg == rt286->index_cache[i].reg) { 195 if (reg == rt286->index_cache[i].reg) {
198 rt286->index_cache[i].def = value; 196 rt286->index_cache[i].def = value;
@@ -200,6 +198,7 @@ static int rt286_hw_write(void *context, unsigned int reg, unsigned int value)
200 } 198 }
201 199
202 } 200 }
201 reg = RT286_PROC_COEF;
203 } 202 }
204 203
205 data[0] = (reg >> 24) & 0xff; 204 data[0] = (reg >> 24) & 0xff;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 484b3bbe8624..4021cd435740 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -647,7 +647,7 @@ int ssm2602_probe(struct device *dev, enum ssm2602_type type,
647 return -ENOMEM; 647 return -ENOMEM;
648 648
649 dev_set_drvdata(dev, ssm2602); 649 dev_set_drvdata(dev, ssm2602);
650 ssm2602->type = SSM2602; 650 ssm2602->type = type;
651 ssm2602->regmap = regmap; 651 ssm2602->regmap = regmap;
652 652
653 return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602, 653 return snd_soc_register_codec(dev, &soc_codec_dev_ssm2602,
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 87eb5776a39b..de6ab06f58a5 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -748,8 +748,9 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
748 return 0; 748 return 0;
749} 749}
750 750
751static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private, 751static int _fsl_ssi_set_dai_fmt(struct device *dev,
752 unsigned int fmt) 752 struct fsl_ssi_private *ssi_private,
753 unsigned int fmt)
753{ 754{
754 struct regmap *regs = ssi_private->regs; 755 struct regmap *regs = ssi_private->regs;
755 u32 strcr = 0, stcr, srcr, scr, mask; 756 u32 strcr = 0, stcr, srcr, scr, mask;
@@ -758,7 +759,7 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi_private *ssi_private,
758 ssi_private->dai_fmt = fmt; 759 ssi_private->dai_fmt = fmt;
759 760
760 if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) { 761 if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
761 dev_err(&ssi_private->pdev->dev, "baudclk is missing which is necessary for master mode\n"); 762 dev_err(dev, "baudclk is missing which is necessary for master mode\n");
762 return -EINVAL; 763 return -EINVAL;
763 } 764 }
764 765
@@ -913,7 +914,7 @@ static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
913{ 914{
914 struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai); 915 struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
915 916
916 return _fsl_ssi_set_dai_fmt(ssi_private, fmt); 917 return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
917} 918}
918 919
919/** 920/**
@@ -1387,7 +1388,8 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1387 1388
1388done: 1389done:
1389 if (ssi_private->dai_fmt) 1390 if (ssi_private->dai_fmt)
1390 _fsl_ssi_set_dai_fmt(ssi_private, ssi_private->dai_fmt); 1391 _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
1392 ssi_private->dai_fmt);
1391 1393
1392 return 0; 1394 return 0;
1393 1395
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 3092b58fede6..cecfab3cc948 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -102,13 +102,11 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
102 fe->dpcm[stream].runtime = fe_substream->runtime; 102 fe->dpcm[stream].runtime = fe_substream->runtime;
103 103
104 ret = dpcm_path_get(fe, stream, &list); 104 ret = dpcm_path_get(fe, stream, &list);
105 if (ret < 0) { 105 if (ret < 0)
106 mutex_unlock(&fe->card->mutex);
107 goto fe_err; 106 goto fe_err;
108 } else if (ret == 0) { 107 else if (ret == 0)
109 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", 108 dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
110 fe->dai_link->name, stream ? "capture" : "playback"); 109 fe->dai_link->name, stream ? "capture" : "playback");
111 }
112 110
113 /* calculate valid and active FE <-> BE dpcms */ 111 /* calculate valid and active FE <-> BE dpcms */
114 dpcm_process_paths(fe, stream, &list, 1); 112 dpcm_process_paths(fe, stream, &list, 1);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 889f4e3d35dc..d074aa91b023 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3203,7 +3203,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
3203 unsigned int val, mask; 3203 unsigned int val, mask;
3204 void *data; 3204 void *data;
3205 3205
3206 if (!component->regmap) 3206 if (!component->regmap || !params->num_regs)
3207 return -EINVAL; 3207 return -EINVAL;
3208 3208
3209 len = params->num_regs * component->val_bytes; 3209 len = params->num_regs * component->val_bytes;