aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt48
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/kernel/entry-header.S2
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/vmlinux.lds.S12
-rw-r--r--arch/sparc/include/asm/vio.h2
-rw-r--r--arch/x86/boot/compressed/head_32.S3
-rw-r--r--arch/x86/boot/compressed/head_64.S3
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S6
-rw-r--r--arch/x86/include/asm/pgtable_types.h1
-rw-r--r--arch/x86/include/asm/topology.h10
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c7
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c67
-rw-r--r--arch/x86/kernel/cpu/perf_event.c3
-rw-r--r--arch/x86/kernel/early_printk.c5
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/tsc_sync.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S79
-rw-r--r--arch/x86/mm/Makefile3
-rw-r--r--arch/x86/mm/init.c63
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/mm/setup_nx.c69
-rw-r--r--arch/x86/xen/enlighten.c10
-rw-r--r--drivers/acpi/bus.c49
-rw-r--r--drivers/acpi/scan.c705
-rw-r--r--drivers/acpi/video.c4
-rw-r--r--drivers/i2c/busses/i2c-scmi.c5
-rw-r--r--drivers/leds/Kconfig13
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cobalt-qube.c2
-rw-r--r--drivers/leds/leds-cobalt-raq.c4
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-pca9532.c12
-rw-r--r--drivers/leds/leds-wm831x-status.c341
-rw-r--r--drivers/leds/ledtrig-gpio.c32
-rw-r--r--drivers/macintosh/via-pmu-led.c2
-rw-r--r--drivers/net/can/at91_can.c1186
-rw-r--r--drivers/net/davinci_emac.c9
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/virtio_net.c229
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c632
-rw-r--r--drivers/pnp/pnpacpi/core.c21
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c1
-rw-r--r--drivers/video/backlight/Kconfig33
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp5520_bl.c377
-rw-r--r--drivers/video/backlight/adx_bl.c178
-rw-r--r--drivers/video/backlight/backlight.c42
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c242
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c36
-rw-r--r--drivers/video/backlight/wm831x_bl.c250
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/cifs/Kconfig1
-rw-r--r--fs/cifs/cifsfs.c93
-rw-r--r--fs/cifs/cifsglob.h21
-rw-r--r--fs/cifs/cifsproto.h11
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/dir.c64
-rw-r--r--fs/cifs/file.c137
-rw-r--r--fs/cifs/misc.c34
-rw-r--r--fs/cifs/readdir.c4
-rw-r--r--fs/cifs/transport.c50
-rw-r--r--fs/fs-writeback.c165
-rw-r--r--include/acpi/acpi_bus.h22
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/backlight.h7
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/kref.h1
-rw-r--r--include/linux/mfd/wm831x/status.h34
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/perf_counter.h2
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--include/linux/spi/lms283gf05.h28
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/trace/events/workqueue.h4
-rw-r--r--kernel/futex.c137
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/time/clocksource.c4
-rw-r--r--kernel/trace/ftrace.c23
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events.c7
-rw-r--r--mm/page-writeback.c30
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/ax25/af_ax25.c27
-rw-r--r--tools/perf/.gitignore1
-rw-r--r--tools/perf/builtin-stat.c18
-rw-r--r--tools/perf/util/module.c96
-rw-r--r--tools/perf/util/parse-events.c49
-rw-r--r--tools/perf/util/symbol.c17
102 files changed, 4398 insertions, 1589 deletions
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index 6d03487ef1c7..aafcaa634191 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -199,18 +199,22 @@ kind to allow it (and it often doesn't!).
199 199
200Not all bits in the mask can be modified. Not all bits that can be 200Not all bits in the mask can be modified. Not all bits that can be
201modified do anything. Not all hot keys can be individually controlled 201modified do anything. Not all hot keys can be individually controlled
202by the mask. Some models do not support the mask at all, and in those 202by the mask. Some models do not support the mask at all. The behaviour
203models, hot keys cannot be controlled individually. The behaviour of 203of the mask is, therefore, highly dependent on the ThinkPad model.
204the mask is, therefore, highly dependent on the ThinkPad model. 204
205The driver will filter out any unmasked hotkeys, so even if the firmware
206doesn't allow disabling an specific hotkey, the driver will not report
207events for unmasked hotkeys.
205 208
206Note that unmasking some keys prevents their default behavior. For 209Note that unmasking some keys prevents their default behavior. For
207example, if Fn+F5 is unmasked, that key will no longer enable/disable 210example, if Fn+F5 is unmasked, that key will no longer enable/disable
208Bluetooth by itself. 211Bluetooth by itself in firmware.
209 212
210Note also that not all Fn key combinations are supported through ACPI. 213Note also that not all Fn key combinations are supported through ACPI
211For example, on the X40, the brightness, volume and "Access IBM" buttons 214depending on the ThinkPad model and firmware version. On those
212do not generate ACPI events even with this driver. They *can* be used 215ThinkPads, it is still possible to support some extra hotkeys by
213through the "ThinkPad Buttons" utility, see http://www.nongnu.org/tpb/ 216polling the "CMOS NVRAM" at least 10 times per second. The driver
217attempts to enables this functionality automatically when required.
214 218
215procfs notes: 219procfs notes:
216 220
@@ -255,18 +259,11 @@ sysfs notes:
255 1: does nothing 259 1: does nothing
256 260
257 hotkey_mask: 261 hotkey_mask:
258 bit mask to enable driver-handling (and depending on 262 bit mask to enable reporting (and depending on
259 the firmware, ACPI event generation) for each hot key 263 the firmware, ACPI event generation) for each hot key
260 (see above). Returns the current status of the hot keys 264 (see above). Returns the current status of the hot keys
261 mask, and allows one to modify it. 265 mask, and allows one to modify it.
262 266
263 Note: when NVRAM polling is active, the firmware mask
264 will be different from the value returned by
265 hotkey_mask. The driver will retain enabled bits for
266 hotkeys that are under NVRAM polling even if the
267 firmware refuses them, and will not set these bits on
268 the firmware hot key mask.
269
270 hotkey_all_mask: 267 hotkey_all_mask:
271 bit mask that should enable event reporting for all 268 bit mask that should enable event reporting for all
272 supported hot keys, when echoed to hotkey_mask above. 269 supported hot keys, when echoed to hotkey_mask above.
@@ -279,7 +276,8 @@ sysfs notes:
279 bit mask that should enable event reporting for all 276 bit mask that should enable event reporting for all
280 supported hot keys, except those which are always 277 supported hot keys, except those which are always
281 handled by the firmware anyway. Echo it to 278 handled by the firmware anyway. Echo it to
282 hotkey_mask above, to use. 279 hotkey_mask above, to use. This is the default mask
280 used by the driver.
283 281
284 hotkey_source_mask: 282 hotkey_source_mask:
285 bit mask that selects which hot keys will the driver 283 bit mask that selects which hot keys will the driver
@@ -287,9 +285,10 @@ sysfs notes:
287 based on the capabilities reported by the ACPI firmware, 285 based on the capabilities reported by the ACPI firmware,
288 but it can be overridden at runtime. 286 but it can be overridden at runtime.
289 287
290 Hot keys whose bits are set in both hotkey_source_mask 288 Hot keys whose bits are set in hotkey_source_mask are
291 and also on hotkey_mask are polled for in NVRAM. Only a 289 polled for in NVRAM, and reported as hotkey events if
292 few hot keys are available through CMOS NVRAM polling. 290 enabled in hotkey_mask. Only a few hot keys are
291 available through CMOS NVRAM polling.
293 292
294 Warning: when in NVRAM mode, the volume up/down/mute 293 Warning: when in NVRAM mode, the volume up/down/mute
295 keys are synthesized according to changes in the mixer, 294 keys are synthesized according to changes in the mixer,
@@ -525,6 +524,7 @@ compatibility purposes when hotkey_report_mode is set to 1.
5250x2305 System is waking up from suspend to eject bay 5240x2305 System is waking up from suspend to eject bay
5260x2404 System is waking up from hibernation to undock 5250x2404 System is waking up from hibernation to undock
5270x2405 System is waking up from hibernation to eject bay 5260x2405 System is waking up from hibernation to eject bay
5270x5010 Brightness level changed/control event
528 528
529The above events are never propagated by the driver. 529The above events are never propagated by the driver.
530 530
@@ -532,7 +532,6 @@ The above events are never propagated by the driver.
5320x4003 Undocked (see 0x2x04), can sleep again 5320x4003 Undocked (see 0x2x04), can sleep again
5330x500B Tablet pen inserted into its storage bay 5330x500B Tablet pen inserted into its storage bay
5340x500C Tablet pen removed from its storage bay 5340x500C Tablet pen removed from its storage bay
5350x5010 Brightness level changed (newer Lenovo BIOSes)
536 535
537The above events are propagated by the driver. 536The above events are propagated by the driver.
538 537
@@ -621,6 +620,8 @@ For Lenovo models *with* ACPI backlight control:
6212. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi, 6202. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
622 and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN. Process 621 and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN. Process
623 these keys on userspace somehow (e.g. by calling xbacklight). 622 these keys on userspace somehow (e.g. by calling xbacklight).
623 The driver will do this automatically if it detects that ACPI video
624 has been disabled.
624 625
625 626
626Bluetooth 627Bluetooth
@@ -1459,3 +1460,8 @@ Sysfs interface changelog:
14590x020400: Marker for 16 LEDs support. Also, LEDs that are known 14600x020400: Marker for 16 LEDs support. Also, LEDs that are known
1460 to not exist in a given model are not registered with 1461 to not exist in a given model are not registered with
1461 the LED sysfs class anymore. 1462 the LED sysfs class anymore.
1463
14640x020500: Updated hotkey driver, hotkey_mask is always available
1465 and it is always able to disable hot keys. Very old
1466 thinkpads are properly supported. hotkey_bios_mask
1467 is deprecated and marked for removal.
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 2906665b1c10..ecf4d488333d 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
+++ b/arch/alpha/kernel/vmlinux.lds.S
@@ -1,4 +1,5 @@
1#include <asm-generic/vmlinux.lds.h> 1#include <asm-generic/vmlinux.lds.h>
2#include <asm/thread_info.h>
2#include <asm/page.h> 3#include <asm/page.h>
3#include <asm/thread_info.h> 4#include <asm/thread_info.h>
4 5
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index e17e3c30d957..ac34c0d9384b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -83,6 +83,8 @@
83 ldr r0, [sp] 83 ldr r0, [sp]
84 strex r1, r2, [sp] @ clear the exclusive monitor 84 strex r1, r2, [sp] @ clear the exclusive monitor
85 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr 85 ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
86#else
87 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
86#endif 88#endif
87 .endm 89 .endm
88 90
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index e96122159928..eecd2a9f155c 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -2,6 +2,8 @@
2# Makefile for the Linux/MIPS kernel. 2# Makefile for the Linux/MIPS kernel.
3# 3#
4 4
5CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
6
5extra-y := head.o init_task.o vmlinux.lds 7extra-y := head.o init_task.o vmlinux.lds
6 8
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 9obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 9bf0e3df7c5a..162b29954baa 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -11,15 +11,15 @@ PHDRS {
11 note PT_NOTE FLAGS(4); /* R__ */ 11 note PT_NOTE FLAGS(4); /* R__ */
12} 12}
13 13
14ifdef CONFIG_32BIT 14#ifdef CONFIG_32BIT
15 ifdef CONFIG_CPU_LITTLE_ENDIAN 15 #ifdef CONFIG_CPU_LITTLE_ENDIAN
16 jiffies = jiffies_64; 16 jiffies = jiffies_64;
17 else 17 #else
18 jiffies = jiffies_64 + 4; 18 jiffies = jiffies_64 + 4;
19 endif 19 #endif
20else 20#else
21 jiffies = jiffies_64; 21 jiffies = jiffies_64;
22endif 22#endif
23 23
24SECTIONS 24SECTIONS
25{ 25{
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 6cdbf7e7351d..9d83d3bcb494 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -258,8 +258,6 @@ static inline void *vio_dring_entry(struct vio_dring_state *dr,
258static inline u32 vio_dring_avail(struct vio_dring_state *dr, 258static inline u32 vio_dring_avail(struct vio_dring_state *dr,
259 unsigned int ring_size) 259 unsigned int ring_size)
260{ 260{
261 MAYBE_BUILD_BUG_ON(!is_power_of_2(ring_size));
262
263 return (dr->pending - 261 return (dr->pending -
264 ((dr->prod - dr->cons) & (ring_size - 1))); 262 ((dr->prod - dr->cons) & (ring_size - 1)));
265} 263}
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 75e4f001e706..f543b70ffae2 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -23,13 +23,14 @@
23 */ 23 */
24 .text 24 .text
25 25
26#include <linux/init.h>
26#include <linux/linkage.h> 27#include <linux/linkage.h>
27#include <asm/segment.h> 28#include <asm/segment.h>
28#include <asm/page_types.h> 29#include <asm/page_types.h>
29#include <asm/boot.h> 30#include <asm/boot.h>
30#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
31 32
32 .section ".text.head","ax",@progbits 33 __HEAD
33ENTRY(startup_32) 34ENTRY(startup_32)
34 cld 35 cld
35 /* 36 /*
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index f62c284db9eb..077e1b69198e 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -24,6 +24,7 @@
24 .code32 24 .code32
25 .text 25 .text
26 26
27#include <linux/init.h>
27#include <linux/linkage.h> 28#include <linux/linkage.h>
28#include <asm/segment.h> 29#include <asm/segment.h>
29#include <asm/pgtable_types.h> 30#include <asm/pgtable_types.h>
@@ -33,7 +34,7 @@
33#include <asm/processor-flags.h> 34#include <asm/processor-flags.h>
34#include <asm/asm-offsets.h> 35#include <asm/asm-offsets.h>
35 36
36 .section ".text.head" 37 __HEAD
37 .code32 38 .code32
38ENTRY(startup_32) 39ENTRY(startup_32)
39 cld 40 cld
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index cc353e1b3ffd..f4193bb48782 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -1,3 +1,5 @@
1#include <asm-generic/vmlinux.lds.h>
2
1OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT) 3OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
2 4
3#undef i386 5#undef i386
@@ -18,9 +20,9 @@ SECTIONS
18 * address 0. 20 * address 0.
19 */ 21 */
20 . = 0; 22 . = 0;
21 .text.head : { 23 .head.text : {
22 _head = . ; 24 _head = . ;
23 *(.text.head) 25 HEAD_TEXT
24 _ehead = . ; 26 _ehead = . ;
25 } 27 }
26 .rodata.compressed : { 28 .rodata.compressed : {
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 7b467bf3c680..d1f4a760be23 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -277,6 +277,7 @@ static inline pteval_t pte_flags(pte_t pte)
277typedef struct page *pgtable_t; 277typedef struct page *pgtable_t;
278 278
279extern pteval_t __supported_pte_mask; 279extern pteval_t __supported_pte_mask;
280extern void set_nx(void);
280extern int nx_enabled; 281extern int nx_enabled;
281 282
282#define pgprot_writecombine pgprot_writecombine 283#define pgprot_writecombine pgprot_writecombine
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 6f0695d744bf..25a92842dd99 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -165,21 +165,11 @@ static inline int numa_node_id(void)
165 return 0; 165 return 0;
166} 166}
167 167
168static inline int cpu_to_node(int cpu)
169{
170 return 0;
171}
172
173static inline int early_cpu_to_node(int cpu) 168static inline int early_cpu_to_node(int cpu)
174{ 169{
175 return 0; 170 return 0;
176} 171}
177 172
178static inline const struct cpumask *cpumask_of_node(int node)
179{
180 return cpu_online_mask;
181}
182
183static inline void setup_node_to_cpumask_map(void) { } 173static inline void setup_node_to_cpumask_map(void) { }
184 174
185#endif 175#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 7029f0e2acad..472763d92098 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -98,8 +98,9 @@ static struct notifier_block mce_raise_nb = {
98}; 98};
99 99
100/* Inject mce on current CPU */ 100/* Inject mce on current CPU */
101static int raise_local(struct mce *m) 101static int raise_local(void)
102{ 102{
103 struct mce *m = &__get_cpu_var(injectm);
103 int context = MCJ_CTX(m->inject_flags); 104 int context = MCJ_CTX(m->inject_flags);
104 int ret = 0; 105 int ret = 0;
105 int cpu = m->extcpu; 106 int cpu = m->extcpu;
@@ -167,12 +168,12 @@ static void raise_mce(struct mce *m)
167 } 168 }
168 cpu_relax(); 169 cpu_relax();
169 } 170 }
170 raise_local(m); 171 raise_local();
171 put_cpu(); 172 put_cpu();
172 put_online_cpus(); 173 put_online_cpus();
173 } else 174 } else
174#endif 175#endif
175 raise_local(m); 176 raise_local();
176} 177}
177 178
178/* Error injection interface */ 179/* Error injection interface */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 2f5aab26320e..4b2af86e3e8d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -305,13 +305,25 @@ static int msr_to_offset(u32 msr)
305static u64 mce_rdmsrl(u32 msr) 305static u64 mce_rdmsrl(u32 msr)
306{ 306{
307 u64 v; 307 u64 v;
308
308 if (__get_cpu_var(injectm).finished) { 309 if (__get_cpu_var(injectm).finished) {
309 int offset = msr_to_offset(msr); 310 int offset = msr_to_offset(msr);
311
310 if (offset < 0) 312 if (offset < 0)
311 return 0; 313 return 0;
312 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset); 314 return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
313 } 315 }
314 rdmsrl(msr, v); 316
317 if (rdmsrl_safe(msr, &v)) {
318 WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
319 /*
320 * Return zero in case the access faulted. This should
321 * not happen normally but can happen if the CPU does
322 * something weird, or if the code is buggy.
323 */
324 v = 0;
325 }
326
315 return v; 327 return v;
316} 328}
317 329
@@ -319,6 +331,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
319{ 331{
320 if (__get_cpu_var(injectm).finished) { 332 if (__get_cpu_var(injectm).finished) {
321 int offset = msr_to_offset(msr); 333 int offset = msr_to_offset(msr);
334
322 if (offset >= 0) 335 if (offset >= 0)
323 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v; 336 *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
324 return; 337 return;
@@ -415,7 +428,7 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
415 m->ip = mce_rdmsrl(rip_msr); 428 m->ip = mce_rdmsrl(rip_msr);
416} 429}
417 430
418#ifdef CONFIG_X86_LOCAL_APIC 431#ifdef CONFIG_X86_LOCAL_APIC
419/* 432/*
420 * Called after interrupts have been reenabled again 433 * Called after interrupts have been reenabled again
421 * when a MCE happened during an interrupts off region 434 * when a MCE happened during an interrupts off region
@@ -1172,6 +1185,7 @@ static int mce_banks_init(void)
1172 return -ENOMEM; 1185 return -ENOMEM;
1173 for (i = 0; i < banks; i++) { 1186 for (i = 0; i < banks; i++) {
1174 struct mce_bank *b = &mce_banks[i]; 1187 struct mce_bank *b = &mce_banks[i];
1188
1175 b->ctl = -1ULL; 1189 b->ctl = -1ULL;
1176 b->init = 1; 1190 b->init = 1;
1177 } 1191 }
@@ -1203,6 +1217,7 @@ static int __cpuinit mce_cap_init(void)
1203 banks = b; 1217 banks = b;
1204 if (!mce_banks) { 1218 if (!mce_banks) {
1205 int err = mce_banks_init(); 1219 int err = mce_banks_init();
1220
1206 if (err) 1221 if (err)
1207 return err; 1222 return err;
1208 } 1223 }
@@ -1237,6 +1252,7 @@ static void mce_init(void)
1237 1252
1238 for (i = 0; i < banks; i++) { 1253 for (i = 0; i < banks; i++) {
1239 struct mce_bank *b = &mce_banks[i]; 1254 struct mce_bank *b = &mce_banks[i];
1255
1240 if (!b->init) 1256 if (!b->init)
1241 continue; 1257 continue;
1242 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); 1258 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
@@ -1626,6 +1642,7 @@ static int mce_disable(void)
1626 1642
1627 for (i = 0; i < banks; i++) { 1643 for (i = 0; i < banks; i++) {
1628 struct mce_bank *b = &mce_banks[i]; 1644 struct mce_bank *b = &mce_banks[i];
1645
1629 if (b->init) 1646 if (b->init)
1630 wrmsrl(MSR_IA32_MCx_CTL(i), 0); 1647 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
1631 } 1648 }
@@ -1911,6 +1928,7 @@ static void mce_disable_cpu(void *h)
1911 cmci_clear(); 1928 cmci_clear();
1912 for (i = 0; i < banks; i++) { 1929 for (i = 0; i < banks; i++) {
1913 struct mce_bank *b = &mce_banks[i]; 1930 struct mce_bank *b = &mce_banks[i];
1931
1914 if (b->init) 1932 if (b->init)
1915 wrmsrl(MSR_IA32_MCx_CTL(i), 0); 1933 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
1916 } 1934 }
@@ -1928,6 +1946,7 @@ static void mce_reenable_cpu(void *h)
1928 cmci_reenable(); 1946 cmci_reenable();
1929 for (i = 0; i < banks; i++) { 1947 for (i = 0; i < banks; i++) {
1930 struct mce_bank *b = &mce_banks[i]; 1948 struct mce_bank *b = &mce_banks[i];
1949
1931 if (b->init) 1950 if (b->init)
1932 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); 1951 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1933 } 1952 }
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 63a56d147e4a..b3a1dba75330 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -34,20 +34,31 @@
34/* How long to wait between reporting thermal events */ 34/* How long to wait between reporting thermal events */
35#define CHECK_INTERVAL (300 * HZ) 35#define CHECK_INTERVAL (300 * HZ)
36 36
37static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; 37/*
38static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); 38 * Current thermal throttling state:
39static DEFINE_PER_CPU(bool, thermal_throttle_active); 39 */
40struct thermal_state {
41 bool is_throttled;
42
43 u64 next_check;
44 unsigned long throttle_count;
45 unsigned long last_throttle_count;
46};
47
48static DEFINE_PER_CPU(struct thermal_state, thermal_state);
40 49
41static atomic_t therm_throt_en = ATOMIC_INIT(0); 50static atomic_t therm_throt_en = ATOMIC_INIT(0);
42 51
43#ifdef CONFIG_SYSFS 52#ifdef CONFIG_SYSFS
44#define define_therm_throt_sysdev_one_ro(_name) \ 53#define define_therm_throt_sysdev_one_ro(_name) \
45 static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL) 54 static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
46 55
47#define define_therm_throt_sysdev_show_func(name) \ 56#define define_therm_throt_sysdev_show_func(name) \
48static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \ 57 \
49 struct sysdev_attribute *attr, \ 58static ssize_t therm_throt_sysdev_show_##name( \
50 char *buf) \ 59 struct sys_device *dev, \
60 struct sysdev_attribute *attr, \
61 char *buf) \
51{ \ 62{ \
52 unsigned int cpu = dev->id; \ 63 unsigned int cpu = dev->id; \
53 ssize_t ret; \ 64 ssize_t ret; \
@@ -55,7 +66,7 @@ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
55 preempt_disable(); /* CPU hotplug */ \ 66 preempt_disable(); /* CPU hotplug */ \
56 if (cpu_online(cpu)) \ 67 if (cpu_online(cpu)) \
57 ret = sprintf(buf, "%lu\n", \ 68 ret = sprintf(buf, "%lu\n", \
58 per_cpu(thermal_throttle_##name, cpu)); \ 69 per_cpu(thermal_state, cpu).name); \
59 else \ 70 else \
60 ret = 0; \ 71 ret = 0; \
61 preempt_enable(); \ 72 preempt_enable(); \
@@ -63,11 +74,11 @@ static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
63 return ret; \ 74 return ret; \
64} 75}
65 76
66define_therm_throt_sysdev_show_func(count); 77define_therm_throt_sysdev_show_func(throttle_count);
67define_therm_throt_sysdev_one_ro(count); 78define_therm_throt_sysdev_one_ro(throttle_count);
68 79
69static struct attribute *thermal_throttle_attrs[] = { 80static struct attribute *thermal_throttle_attrs[] = {
70 &attr_count.attr, 81 &attr_throttle_count.attr,
71 NULL 82 NULL
72}; 83};
73 84
@@ -93,33 +104,39 @@ static struct attribute_group thermal_throttle_attr_group = {
93 * 1 : Event should be logged further, and a message has been 104 * 1 : Event should be logged further, and a message has been
94 * printed to the syslog. 105 * printed to the syslog.
95 */ 106 */
96static int therm_throt_process(int curr) 107static int therm_throt_process(bool is_throttled)
97{ 108{
98 unsigned int cpu = smp_processor_id(); 109 struct thermal_state *state;
99 __u64 tmp_jiffs = get_jiffies_64(); 110 unsigned int this_cpu;
100 bool was_throttled = __get_cpu_var(thermal_throttle_active); 111 bool was_throttled;
101 bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr; 112 u64 now;
113
114 this_cpu = smp_processor_id();
115 now = get_jiffies_64();
116 state = &per_cpu(thermal_state, this_cpu);
117
118 was_throttled = state->is_throttled;
119 state->is_throttled = is_throttled;
102 120
103 if (is_throttled) 121 if (is_throttled)
104 __get_cpu_var(thermal_throttle_count)++; 122 state->throttle_count++;
105 123
106 if (!(was_throttled ^ is_throttled) && 124 if (time_before64(now, state->next_check) &&
107 time_before64(tmp_jiffs, __get_cpu_var(next_check))) 125 state->throttle_count != state->last_throttle_count)
108 return 0; 126 return 0;
109 127
110 __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; 128 state->next_check = now + CHECK_INTERVAL;
129 state->last_throttle_count = state->throttle_count;
111 130
112 /* if we just entered the thermal event */ 131 /* if we just entered the thermal event */
113 if (is_throttled) { 132 if (is_throttled) {
114 printk(KERN_CRIT "CPU%d: Temperature above threshold, " 133 printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count);
115 "cpu clock throttled (total events = %lu)\n",
116 cpu, __get_cpu_var(thermal_throttle_count));
117 134
118 add_taint(TAINT_MACHINE_CHECK); 135 add_taint(TAINT_MACHINE_CHECK);
119 return 1; 136 return 1;
120 } 137 }
121 if (was_throttled) { 138 if (was_throttled) {
122 printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); 139 printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu);
123 return 1; 140 return 1;
124 } 141 }
125 142
@@ -213,7 +230,7 @@ static void intel_thermal_interrupt(void)
213 __u64 msr_val; 230 __u64 msr_val;
214 231
215 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 232 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
216 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT)) 233 if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0))
217 mce_log_therm_throt_event(msr_val); 234 mce_log_therm_throt_event(msr_val);
218} 235}
219 236
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a3c7adb06b78..b5801c311846 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1790,6 +1790,9 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
1790void set_perf_event_pending(void) 1790void set_perf_event_pending(void)
1791{ 1791{
1792#ifdef CONFIG_X86_LOCAL_APIC 1792#ifdef CONFIG_X86_LOCAL_APIC
1793 if (!x86_pmu.apic || !x86_pmu_initialized())
1794 return;
1795
1793 apic->send_IPI_self(LOCAL_PENDING_VECTOR); 1796 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1794#endif 1797#endif
1795} 1798}
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 2acfd3fdc0cc..41fd965c80c6 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -178,6 +178,11 @@ asmlinkage void early_printk(const char *fmt, ...)
178 178
179static inline void early_console_register(struct console *con, int keep_early) 179static inline void early_console_register(struct console *con, int keep_early)
180{ 180{
181 if (early_console->index != -1) {
182 printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
183 con->name);
184 return;
185 }
181 early_console = con; 186 early_console = con;
182 if (keep_early) 187 if (keep_early)
183 early_console->flags &= ~CON_BOOT; 188 early_console->flags &= ~CON_BOOT;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 218aad7ee76e..050c278481b1 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -79,7 +79,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
79 * any particular GDT layout, because we load our own as soon as we 79 * any particular GDT layout, because we load our own as soon as we
80 * can. 80 * can.
81 */ 81 */
82.section .text.head,"ax",@progbits 82__HEAD
83ENTRY(startup_32) 83ENTRY(startup_32)
84 /* test KEEP_SEGMENTS flag to see if the bootloader is asking 84 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
85 us to not reload segments */ 85 us to not reload segments */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index d0bc0a13a437..780cd928fcd5 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -40,7 +40,7 @@ L4_START_KERNEL = pgd_index(__START_KERNEL_map)
40L3_START_KERNEL = pud_index(__START_KERNEL_map) 40L3_START_KERNEL = pud_index(__START_KERNEL_map)
41 41
42 .text 42 .text
43 .section .text.head 43 __HEAD
44 .code64 44 .code64
45 .globl startup_64 45 .globl startup_64
46startup_64: 46startup_64:
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a665c71352b8..7e37dcee0cc3 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -72,11 +72,9 @@ char ignore_fpu_irq;
72 72
73/* 73/*
74 * The IDT has to be page-aligned to simplify the Pentium 74 * The IDT has to be page-aligned to simplify the Pentium
75 * F0 0F bug workaround.. We have a special link segment 75 * F0 0F bug workaround.
76 * for this.
77 */ 76 */
78gate_desc idt_table[NR_VECTORS] 77gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
79 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
80#endif 78#endif
81 79
82DECLARE_BITMAP(used_vectors, NR_VECTORS); 80DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 027b5b498993..f37930954d15 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -114,7 +114,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
114 return; 114 return;
115 115
116 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { 116 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
117 pr_info("Skipping synchronization checks as TSC is reliable.\n"); 117 printk_once(KERN_INFO "Skipping synchronization checks as TSC is reliable.\n");
118 return; 118 return;
119 } 119 }
120 120
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a46acccec38a..92929fb3f9fa 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,17 +65,11 @@ SECTIONS
65#endif 65#endif
66 66
67 /* Text and read-only data */ 67 /* Text and read-only data */
68
69 /* bootstrapping code */
70 .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) {
71 _text = .;
72 *(.text.head)
73 } :text = 0x9090
74
75 /* The rest of the text */
76 .text : AT(ADDR(.text) - LOAD_OFFSET) { 68 .text : AT(ADDR(.text) - LOAD_OFFSET) {
69 _text = .;
70 /* bootstrapping code */
71 HEAD_TEXT
77#ifdef CONFIG_X86_32 72#ifdef CONFIG_X86_32
78 /* not really needed, already page aligned */
79 . = ALIGN(PAGE_SIZE); 73 . = ALIGN(PAGE_SIZE);
80 *(.text.page_aligned) 74 *(.text.page_aligned)
81#endif 75#endif
@@ -94,13 +88,7 @@ SECTIONS
94 88
95 NOTES :text :note 89 NOTES :text :note
96 90
97 /* Exception table */ 91 EXCEPTION_TABLE(16) :text = 0x9090
98 . = ALIGN(16);
99 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
100 __start___ex_table = .;
101 *(__ex_table)
102 __stop___ex_table = .;
103 } :text = 0x9090
104 92
105 RO_DATA(PAGE_SIZE) 93 RO_DATA(PAGE_SIZE)
106 94
@@ -118,7 +106,6 @@ SECTIONS
118#endif 106#endif
119 107
120 PAGE_ALIGNED_DATA(PAGE_SIZE) 108 PAGE_ALIGNED_DATA(PAGE_SIZE)
121 *(.data.idt)
122 109
123 CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES) 110 CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
124 111
@@ -135,24 +122,21 @@ SECTIONS
135#ifdef CONFIG_X86_64 122#ifdef CONFIG_X86_64
136 123
137#define VSYSCALL_ADDR (-10*1024*1024) 124#define VSYSCALL_ADDR (-10*1024*1024)
138#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
139 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
140#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
141 PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
142 125
143#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR) 126#define VLOAD_OFFSET (VSYSCALL_ADDR - __vsyscall_0 + LOAD_OFFSET)
144#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET) 127#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
145 128
146#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR) 129#define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
147#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) 130#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
148 131
132 . = ALIGN(4096);
133 __vsyscall_0 = .;
134
149 . = VSYSCALL_ADDR; 135 . = VSYSCALL_ADDR;
150 .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { 136 .vsyscall_0 : AT(VLOAD(.vsyscall_0)) {
151 *(.vsyscall_0) 137 *(.vsyscall_0)
152 } :user 138 } :user
153 139
154 __vsyscall_0 = VSYSCALL_VIRT_ADDR;
155
156 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 140 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
157 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { 141 .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) {
158 *(.vsyscall_fn) 142 *(.vsyscall_fn)
@@ -192,11 +176,9 @@ SECTIONS
192 *(.vsyscall_3) 176 *(.vsyscall_3)
193 } 177 }
194 178
195 . = VSYSCALL_VIRT_ADDR + PAGE_SIZE; 179 . = __vsyscall_0 + PAGE_SIZE;
196 180
197#undef VSYSCALL_ADDR 181#undef VSYSCALL_ADDR
198#undef VSYSCALL_PHYS_ADDR
199#undef VSYSCALL_VIRT_ADDR
200#undef VLOAD_OFFSET 182#undef VLOAD_OFFSET
201#undef VLOAD 183#undef VLOAD
202#undef VVIRT_OFFSET 184#undef VVIRT_OFFSET
@@ -219,36 +201,12 @@ SECTIONS
219 PERCPU_VADDR(0, :percpu) 201 PERCPU_VADDR(0, :percpu)
220#endif 202#endif
221 203
222 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { 204 INIT_TEXT_SECTION(PAGE_SIZE)
223 _sinittext = .;
224 INIT_TEXT
225 _einittext = .;
226 }
227#ifdef CONFIG_X86_64 205#ifdef CONFIG_X86_64
228 :init 206 :init
229#endif 207#endif
230 208
231 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 209 INIT_DATA_SECTION(16)
232 INIT_DATA
233 }
234
235 . = ALIGN(16);
236 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
237 __setup_start = .;
238 *(.init.setup)
239 __setup_end = .;
240 }
241 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
242 __initcall_start = .;
243 INITCALLS
244 __initcall_end = .;
245 }
246
247 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
248 __con_initcall_start = .;
249 *(.con_initcall.init)
250 __con_initcall_end = .;
251 }
252 210
253 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 211 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
254 __x86_cpu_dev_start = .; 212 __x86_cpu_dev_start = .;
@@ -256,8 +214,6 @@ SECTIONS
256 __x86_cpu_dev_end = .; 214 __x86_cpu_dev_end = .;
257 } 215 }
258 216
259 SECURITY_INIT
260
261 . = ALIGN(8); 217 . = ALIGN(8);
262 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { 218 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
263 __parainstructions = .; 219 __parainstructions = .;
@@ -288,15 +244,6 @@ SECTIONS
288 EXIT_DATA 244 EXIT_DATA
289 } 245 }
290 246
291#ifdef CONFIG_BLK_DEV_INITRD
292 . = ALIGN(PAGE_SIZE);
293 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
294 __initramfs_start = .;
295 *(.init.ramfs)
296 __initramfs_end = .;
297 }
298#endif
299
300#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) 247#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
301 PERCPU(PAGE_SIZE) 248 PERCPU(PAGE_SIZE)
302#endif 249#endif
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 9b5a9f59a478..06630d26e56d 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,9 +1,10 @@
1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o physaddr.o gup.o 2 pat.o pgtable.o physaddr.o gup.o setup_nx.o
3 3
4# Make sure __phys_addr has no stackprotector 4# Make sure __phys_addr has no stackprotector
5nostackp := $(call cc-option, -fno-stack-protector) 5nostackp := $(call cc-option, -fno-stack-protector)
6CFLAGS_physaddr.o := $(nostackp) 6CFLAGS_physaddr.o := $(nostackp)
7CFLAGS_setup_nx.o := $(nostackp)
7 8
8obj-$(CONFIG_SMP) += tlb.o 9obj-$(CONFIG_SMP) += tlb.o
9 10
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 0607119cef94..73ffd5536f62 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -28,69 +28,6 @@ int direct_gbpages
28#endif 28#endif
29; 29;
30 30
31int nx_enabled;
32
33#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34static int disable_nx __cpuinitdata;
35
36/*
37 * noexec = on|off
38 *
39 * Control non-executable mappings for processes.
40 *
41 * on Enable
42 * off Disable
43 */
44static int __init noexec_setup(char *str)
45{
46 if (!str)
47 return -EINVAL;
48 if (!strncmp(str, "on", 2)) {
49 __supported_pte_mask |= _PAGE_NX;
50 disable_nx = 0;
51 } else if (!strncmp(str, "off", 3)) {
52 disable_nx = 1;
53 __supported_pte_mask &= ~_PAGE_NX;
54 }
55 return 0;
56}
57early_param("noexec", noexec_setup);
58#endif
59
60#ifdef CONFIG_X86_PAE
61static void __init set_nx(void)
62{
63 unsigned int v[4], l, h;
64
65 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
66 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
67
68 if ((v[3] & (1 << 20)) && !disable_nx) {
69 rdmsr(MSR_EFER, l, h);
70 l |= EFER_NX;
71 wrmsr(MSR_EFER, l, h);
72 nx_enabled = 1;
73 __supported_pte_mask |= _PAGE_NX;
74 }
75 }
76}
77#else
78static inline void set_nx(void)
79{
80}
81#endif
82
83#ifdef CONFIG_X86_64
84void __cpuinit check_efer(void)
85{
86 unsigned long efer;
87
88 rdmsrl(MSR_EFER, efer);
89 if (!(efer & EFER_NX) || disable_nx)
90 __supported_pte_mask &= ~_PAGE_NX;
91}
92#endif
93
94static void __init find_early_table_space(unsigned long end, int use_pse, 31static void __init find_early_table_space(unsigned long end, int use_pse,
95 int use_gbpages) 32 int use_gbpages)
96{ 33{
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 7257cf3decf9..e78cd0ec2bcf 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -81,6 +81,7 @@ enum {
81void pat_init(void) 81void pat_init(void)
82{ 82{
83 u64 pat; 83 u64 pat;
84 bool boot_cpu = !boot_pat_state;
84 85
85 if (!pat_enabled) 86 if (!pat_enabled)
86 return; 87 return;
@@ -122,8 +123,10 @@ void pat_init(void)
122 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); 123 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
123 124
124 wrmsrl(MSR_IA32_CR_PAT, pat); 125 wrmsrl(MSR_IA32_CR_PAT, pat);
125 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", 126
126 smp_processor_id(), boot_pat_state, pat); 127 if (boot_cpu)
128 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
129 smp_processor_id(), boot_pat_state, pat);
127} 130}
128 131
129#undef PAT 132#undef PAT
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
new file mode 100644
index 000000000000..513d8ed5d2ec
--- /dev/null
+++ b/arch/x86/mm/setup_nx.c
@@ -0,0 +1,69 @@
1#include <linux/spinlock.h>
2#include <linux/errno.h>
3#include <linux/init.h>
4
5#include <asm/pgtable.h>
6
7int nx_enabled;
8
9#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
10static int disable_nx __cpuinitdata;
11
12/*
13 * noexec = on|off
14 *
15 * Control non-executable mappings for processes.
16 *
17 * on Enable
18 * off Disable
19 */
20static int __init noexec_setup(char *str)
21{
22 if (!str)
23 return -EINVAL;
24 if (!strncmp(str, "on", 2)) {
25 __supported_pte_mask |= _PAGE_NX;
26 disable_nx = 0;
27 } else if (!strncmp(str, "off", 3)) {
28 disable_nx = 1;
29 __supported_pte_mask &= ~_PAGE_NX;
30 }
31 return 0;
32}
33early_param("noexec", noexec_setup);
34#endif
35
36#ifdef CONFIG_X86_PAE
37void __init set_nx(void)
38{
39 unsigned int v[4], l, h;
40
41 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
42 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
43
44 if ((v[3] & (1 << 20)) && !disable_nx) {
45 rdmsr(MSR_EFER, l, h);
46 l |= EFER_NX;
47 wrmsr(MSR_EFER, l, h);
48 nx_enabled = 1;
49 __supported_pte_mask |= _PAGE_NX;
50 }
51 }
52}
53#else
54void set_nx(void)
55{
56}
57#endif
58
59#ifdef CONFIG_X86_64
60void __cpuinit check_efer(void)
61{
62 unsigned long efer;
63
64 rdmsrl(MSR_EFER, efer);
65 if (!(efer & EFER_NX) || disable_nx)
66 __supported_pte_mask &= ~_PAGE_NX;
67}
68#endif
69
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 544eb7496531..3439616d69f1 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1082,6 +1082,11 @@ asmlinkage void __init xen_start_kernel(void)
1082 1082
1083 __supported_pte_mask |= _PAGE_IOMAP; 1083 __supported_pte_mask |= _PAGE_IOMAP;
1084 1084
1085#ifdef CONFIG_X86_64
1086 /* Work out if we support NX */
1087 check_efer();
1088#endif
1089
1085 xen_setup_features(); 1090 xen_setup_features();
1086 1091
1087 /* Get mfn list */ 1092 /* Get mfn list */
@@ -1123,11 +1128,6 @@ asmlinkage void __init xen_start_kernel(void)
1123 1128
1124 pgd = (pgd_t *)xen_start_info->pt_base; 1129 pgd = (pgd_t *)xen_start_info->pt_base;
1125 1130
1126#ifdef CONFIG_X86_64
1127 /* Work out if we support NX */
1128 check_efer();
1129#endif
1130
1131 /* Don't do the full vcpu_info placement stuff until we have a 1131 /* Don't do the full vcpu_info placement stuff until we have a
1132 possible map and a non-dummy shared_info. */ 1132 possible map and a non-dummy shared_info. */
1133 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 1133 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 135fbfe1825c..741191524353 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -94,36 +94,33 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
94 94
95EXPORT_SYMBOL(acpi_bus_get_device); 95EXPORT_SYMBOL(acpi_bus_get_device);
96 96
97int acpi_bus_get_status(struct acpi_device *device) 97acpi_status acpi_bus_get_status_handle(acpi_handle handle,
98 unsigned long long *sta)
98{ 99{
99 acpi_status status = AE_OK; 100 acpi_status status;
100 unsigned long long sta = 0;
101
102 101
103 if (!device) 102 status = acpi_evaluate_integer(handle, "_STA", NULL, sta);
104 return -EINVAL; 103 if (ACPI_SUCCESS(status))
104 return AE_OK;
105 105
106 /* 106 if (status == AE_NOT_FOUND) {
107 * Evaluate _STA if present. 107 *sta = ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
108 */ 108 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
109 if (device->flags.dynamic_status) { 109 return AE_OK;
110 status =
111 acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
112 if (ACPI_FAILURE(status))
113 return -ENODEV;
114 STRUCT_TO_INT(device->status) = (int)sta;
115 } 110 }
111 return status;
112}
116 113
117 /* 114int acpi_bus_get_status(struct acpi_device *device)
118 * According to ACPI spec some device can be present and functional 115{
119 * even if the parent is not present but functional. 116 acpi_status status;
120 * In such conditions the child device should not inherit the status 117 unsigned long long sta;
121 * from the parent. 118
122 */ 119 status = acpi_bus_get_status_handle(device->handle, &sta);
123 else 120 if (ACPI_FAILURE(status))
124 STRUCT_TO_INT(device->status) = 121 return -ENODEV;
125 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | 122
126 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING; 123 STRUCT_TO_INT(device->status) = (int) sta;
127 124
128 if (device->status.functional && !device->status.present) { 125 if (device->status.functional && !device->status.present) {
129 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: " 126 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
@@ -135,10 +132,8 @@ int acpi_bus_get_status(struct acpi_device *device)
135 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n", 132 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
136 device->pnp.bus_id, 133 device->pnp.bus_id,
137 (u32) STRUCT_TO_INT(device->status))); 134 (u32) STRUCT_TO_INT(device->status)));
138
139 return 0; 135 return 0;
140} 136}
141
142EXPORT_SYMBOL(acpi_bus_get_status); 137EXPORT_SYMBOL(acpi_bus_get_status);
143 138
144void acpi_bus_private_data_handler(acpi_handle handle, 139void acpi_bus_private_data_handler(acpi_handle handle,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 408ebde18986..468921bed22f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -22,6 +22,8 @@ extern struct acpi_device *acpi_root;
22#define ACPI_BUS_HID "LNXSYBUS" 22#define ACPI_BUS_HID "LNXSYBUS"
23#define ACPI_BUS_DEVICE_NAME "System Bus" 23#define ACPI_BUS_DEVICE_NAME "System Bus"
24 24
25#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
26
25static LIST_HEAD(acpi_device_list); 27static LIST_HEAD(acpi_device_list);
26static LIST_HEAD(acpi_bus_id_list); 28static LIST_HEAD(acpi_bus_id_list);
27DEFINE_MUTEX(acpi_device_lock); 29DEFINE_MUTEX(acpi_device_lock);
@@ -43,40 +45,19 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
43{ 45{
44 int len; 46 int len;
45 int count; 47 int count;
46 48 struct acpi_hardware_id *id;
47 if (!acpi_dev->flags.hardware_id && !acpi_dev->flags.compatible_ids)
48 return -ENODEV;
49 49
50 len = snprintf(modalias, size, "acpi:"); 50 len = snprintf(modalias, size, "acpi:");
51 size -= len; 51 size -= len;
52 52
53 if (acpi_dev->flags.hardware_id) { 53 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
54 count = snprintf(&modalias[len], size, "%s:", 54 count = snprintf(&modalias[len], size, "%s:", id->id);
55 acpi_dev->pnp.hardware_id);
56 if (count < 0 || count >= size) 55 if (count < 0 || count >= size)
57 return -EINVAL; 56 return -EINVAL;
58 len += count; 57 len += count;
59 size -= count; 58 size -= count;
60 } 59 }
61 60
62 if (acpi_dev->flags.compatible_ids) {
63 struct acpica_device_id_list *cid_list;
64 int i;
65
66 cid_list = acpi_dev->pnp.cid_list;
67 for (i = 0; i < cid_list->count; i++) {
68 count = snprintf(&modalias[len], size, "%s:",
69 cid_list->ids[i].string);
70 if (count < 0 || count >= size) {
71 printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
72 acpi_dev->pnp.device_name, i);
73 break;
74 }
75 len += count;
76 size -= count;
77 }
78 }
79
80 modalias[len] = '\0'; 61 modalias[len] = '\0';
81 return len; 62 return len;
82} 63}
@@ -183,7 +164,7 @@ static ssize_t
183acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { 164acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
184 struct acpi_device *acpi_dev = to_acpi_device(dev); 165 struct acpi_device *acpi_dev = to_acpi_device(dev);
185 166
186 return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id); 167 return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
187} 168}
188static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); 169static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
189 170
@@ -219,17 +200,13 @@ static int acpi_device_setup_files(struct acpi_device *dev)
219 goto end; 200 goto end;
220 } 201 }
221 202
222 if (dev->flags.hardware_id) { 203 result = device_create_file(&dev->dev, &dev_attr_hid);
223 result = device_create_file(&dev->dev, &dev_attr_hid); 204 if (result)
224 if (result) 205 goto end;
225 goto end;
226 }
227 206
228 if (dev->flags.hardware_id || dev->flags.compatible_ids) { 207 result = device_create_file(&dev->dev, &dev_attr_modalias);
229 result = device_create_file(&dev->dev, &dev_attr_modalias); 208 if (result)
230 if (result) 209 goto end;
231 goto end;
232 }
233 210
234 /* 211 /*
235 * If device has _EJ0, 'eject' file is created that is used to trigger 212 * If device has _EJ0, 'eject' file is created that is used to trigger
@@ -255,11 +232,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
255 if (ACPI_SUCCESS(status)) 232 if (ACPI_SUCCESS(status))
256 device_remove_file(&dev->dev, &dev_attr_eject); 233 device_remove_file(&dev->dev, &dev_attr_eject);
257 234
258 if (dev->flags.hardware_id || dev->flags.compatible_ids) 235 device_remove_file(&dev->dev, &dev_attr_modalias);
259 device_remove_file(&dev->dev, &dev_attr_modalias); 236 device_remove_file(&dev->dev, &dev_attr_hid);
260
261 if (dev->flags.hardware_id)
262 device_remove_file(&dev->dev, &dev_attr_hid);
263 if (dev->handle) 237 if (dev->handle)
264 device_remove_file(&dev->dev, &dev_attr_path); 238 device_remove_file(&dev->dev, &dev_attr_path);
265} 239}
@@ -271,6 +245,7 @@ int acpi_match_device_ids(struct acpi_device *device,
271 const struct acpi_device_id *ids) 245 const struct acpi_device_id *ids)
272{ 246{
273 const struct acpi_device_id *id; 247 const struct acpi_device_id *id;
248 struct acpi_hardware_id *hwid;
274 249
275 /* 250 /*
276 * If the device is not present, it is unnecessary to load device 251 * If the device is not present, it is unnecessary to load device
@@ -279,40 +254,30 @@ int acpi_match_device_ids(struct acpi_device *device,
279 if (!device->status.present) 254 if (!device->status.present)
280 return -ENODEV; 255 return -ENODEV;
281 256
282 if (device->flags.hardware_id) { 257 for (id = ids; id->id[0]; id++)
283 for (id = ids; id->id[0]; id++) { 258 list_for_each_entry(hwid, &device->pnp.ids, list)
284 if (!strcmp((char*)id->id, device->pnp.hardware_id)) 259 if (!strcmp((char *) id->id, hwid->id))
285 return 0; 260 return 0;
286 }
287 }
288
289 if (device->flags.compatible_ids) {
290 struct acpica_device_id_list *cid_list = device->pnp.cid_list;
291 int i;
292
293 for (id = ids; id->id[0]; id++) {
294 /* compare multiple _CID entries against driver ids */
295 for (i = 0; i < cid_list->count; i++) {
296 if (!strcmp((char*)id->id,
297 cid_list->ids[i].string))
298 return 0;
299 }
300 }
301 }
302 261
303 return -ENOENT; 262 return -ENOENT;
304} 263}
305EXPORT_SYMBOL(acpi_match_device_ids); 264EXPORT_SYMBOL(acpi_match_device_ids);
306 265
266static void acpi_free_ids(struct acpi_device *device)
267{
268 struct acpi_hardware_id *id, *tmp;
269
270 list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
271 kfree(id->id);
272 kfree(id);
273 }
274}
275
307static void acpi_device_release(struct device *dev) 276static void acpi_device_release(struct device *dev)
308{ 277{
309 struct acpi_device *acpi_dev = to_acpi_device(dev); 278 struct acpi_device *acpi_dev = to_acpi_device(dev);
310 279
311 kfree(acpi_dev->pnp.cid_list); 280 acpi_free_ids(acpi_dev);
312 if (acpi_dev->flags.hardware_id)
313 kfree(acpi_dev->pnp.hardware_id);
314 if (acpi_dev->flags.unique_id)
315 kfree(acpi_dev->pnp.unique_id);
316 kfree(acpi_dev); 281 kfree(acpi_dev);
317} 282}
318 283
@@ -378,15 +343,13 @@ static acpi_status acpi_device_notify_fixed(void *data)
378static int acpi_device_install_notify_handler(struct acpi_device *device) 343static int acpi_device_install_notify_handler(struct acpi_device *device)
379{ 344{
380 acpi_status status; 345 acpi_status status;
381 char *hid;
382 346
383 hid = acpi_device_hid(device); 347 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
384 if (!strcmp(hid, ACPI_BUTTON_HID_POWERF))
385 status = 348 status =
386 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 349 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
387 acpi_device_notify_fixed, 350 acpi_device_notify_fixed,
388 device); 351 device);
389 else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) 352 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
390 status = 353 status =
391 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 354 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
392 acpi_device_notify_fixed, 355 acpi_device_notify_fixed,
@@ -404,10 +367,10 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
404 367
405static void acpi_device_remove_notify_handler(struct acpi_device *device) 368static void acpi_device_remove_notify_handler(struct acpi_device *device)
406{ 369{
407 if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF)) 370 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
408 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 371 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
409 acpi_device_notify_fixed); 372 acpi_device_notify_fixed);
410 else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF)) 373 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
411 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 374 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
412 acpi_device_notify_fixed); 375 acpi_device_notify_fixed);
413 else 376 else
@@ -474,12 +437,12 @@ struct bus_type acpi_bus_type = {
474 .uevent = acpi_device_uevent, 437 .uevent = acpi_device_uevent,
475}; 438};
476 439
477static int acpi_device_register(struct acpi_device *device, 440static int acpi_device_register(struct acpi_device *device)
478 struct acpi_device *parent)
479{ 441{
480 int result; 442 int result;
481 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; 443 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
482 int found = 0; 444 int found = 0;
445
483 /* 446 /*
484 * Linkage 447 * Linkage
485 * ------- 448 * -------
@@ -501,8 +464,9 @@ static int acpi_device_register(struct acpi_device *device,
501 * If failed, create one and link it into acpi_bus_id_list 464 * If failed, create one and link it into acpi_bus_id_list
502 */ 465 */
503 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { 466 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
504 if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) { 467 if (!strcmp(acpi_device_bus_id->bus_id,
505 acpi_device_bus_id->instance_no ++; 468 acpi_device_hid(device))) {
469 acpi_device_bus_id->instance_no++;
506 found = 1; 470 found = 1;
507 kfree(new_bus_id); 471 kfree(new_bus_id);
508 break; 472 break;
@@ -510,7 +474,7 @@ static int acpi_device_register(struct acpi_device *device,
510 } 474 }
511 if (!found) { 475 if (!found) {
512 acpi_device_bus_id = new_bus_id; 476 acpi_device_bus_id = new_bus_id;
513 strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); 477 strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
514 acpi_device_bus_id->instance_no = 0; 478 acpi_device_bus_id->instance_no = 0;
515 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 479 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
516 } 480 }
@@ -524,7 +488,7 @@ static int acpi_device_register(struct acpi_device *device,
524 mutex_unlock(&acpi_device_lock); 488 mutex_unlock(&acpi_device_lock);
525 489
526 if (device->parent) 490 if (device->parent)
527 device->dev.parent = &parent->dev; 491 device->dev.parent = &device->parent->dev;
528 device->dev.bus = &acpi_bus_type; 492 device->dev.bus = &acpi_bus_type;
529 device->dev.release = &acpi_device_release; 493 device->dev.release = &acpi_device_release;
530 result = device_register(&device->dev); 494 result = device_register(&device->dev);
@@ -664,6 +628,33 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
664/* -------------------------------------------------------------------------- 628/* --------------------------------------------------------------------------
665 Device Enumeration 629 Device Enumeration
666 -------------------------------------------------------------------------- */ 630 -------------------------------------------------------------------------- */
631static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
632{
633 acpi_status status;
634 int ret;
635 struct acpi_device *device;
636
637 /*
638 * Fixed hardware devices do not appear in the namespace and do not
639 * have handles, but we fabricate acpi_devices for them, so we have
640 * to deal with them specially.
641 */
642 if (handle == NULL)
643 return acpi_root;
644
645 do {
646 status = acpi_get_parent(handle, &handle);
647 if (status == AE_NULL_ENTRY)
648 return NULL;
649 if (ACPI_FAILURE(status))
650 return acpi_root;
651
652 ret = acpi_bus_get_device(handle, &device);
653 if (ret == 0)
654 return device;
655 } while (1);
656}
657
667acpi_status 658acpi_status
668acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) 659acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
669{ 660{
@@ -876,11 +867,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
876 if (ACPI_SUCCESS(status)) 867 if (ACPI_SUCCESS(status))
877 device->flags.dynamic_status = 1; 868 device->flags.dynamic_status = 1;
878 869
879 /* Presence of _CID indicates 'compatible_ids' */
880 status = acpi_get_handle(device->handle, "_CID", &temp);
881 if (ACPI_SUCCESS(status))
882 device->flags.compatible_ids = 1;
883
884 /* Presence of _RMV indicates 'removable' */ 870 /* Presence of _RMV indicates 'removable' */
885 status = acpi_get_handle(device->handle, "_RMV", &temp); 871 status = acpi_get_handle(device->handle, "_RMV", &temp);
886 if (ACPI_SUCCESS(status)) 872 if (ACPI_SUCCESS(status))
@@ -918,8 +904,7 @@ static int acpi_bus_get_flags(struct acpi_device *device)
918 return 0; 904 return 0;
919} 905}
920 906
921static void acpi_device_get_busid(struct acpi_device *device, 907static void acpi_device_get_busid(struct acpi_device *device)
922 acpi_handle handle, int type)
923{ 908{
924 char bus_id[5] = { '?', 0 }; 909 char bus_id[5] = { '?', 0 };
925 struct acpi_buffer buffer = { sizeof(bus_id), bus_id }; 910 struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
@@ -931,10 +916,12 @@ static void acpi_device_get_busid(struct acpi_device *device,
931 * The device's Bus ID is simply the object name. 916 * The device's Bus ID is simply the object name.
932 * TBD: Shouldn't this value be unique (within the ACPI namespace)? 917 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
933 */ 918 */
934 switch (type) { 919 if (ACPI_IS_ROOT_DEVICE(device)) {
935 case ACPI_BUS_TYPE_SYSTEM:
936 strcpy(device->pnp.bus_id, "ACPI"); 920 strcpy(device->pnp.bus_id, "ACPI");
937 break; 921 return;
922 }
923
924 switch (device->device_type) {
938 case ACPI_BUS_TYPE_POWER_BUTTON: 925 case ACPI_BUS_TYPE_POWER_BUTTON:
939 strcpy(device->pnp.bus_id, "PWRF"); 926 strcpy(device->pnp.bus_id, "PWRF");
940 break; 927 break;
@@ -942,7 +929,7 @@ static void acpi_device_get_busid(struct acpi_device *device,
942 strcpy(device->pnp.bus_id, "SLPF"); 929 strcpy(device->pnp.bus_id, "SLPF");
943 break; 930 break;
944 default: 931 default:
945 acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 932 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
946 /* Clean up trailing underscores (if any) */ 933 /* Clean up trailing underscores (if any) */
947 for (i = 3; i > 1; i--) { 934 for (i = 3; i > 1; i--) {
948 if (bus_id[i] == '_') 935 if (bus_id[i] == '_')
@@ -1000,204 +987,132 @@ static int acpi_dock_match(struct acpi_device *device)
1000 return acpi_get_handle(device->handle, "_DCK", &tmp); 987 return acpi_get_handle(device->handle, "_DCK", &tmp);
1001} 988}
1002 989
1003static struct acpica_device_id_list* 990char *acpi_device_hid(struct acpi_device *device)
1004acpi_add_cid(
1005 struct acpi_device_info *info,
1006 struct acpica_device_id *new_cid)
1007{ 991{
1008 struct acpica_device_id_list *cid; 992 struct acpi_hardware_id *hid;
1009 char *next_id_string;
1010 acpi_size cid_length;
1011 acpi_size new_cid_length;
1012 u32 i;
1013
1014
1015 /* Allocate new CID list with room for the new CID */
1016
1017 if (!new_cid)
1018 new_cid_length = info->compatible_id_list.list_size;
1019 else if (info->compatible_id_list.list_size)
1020 new_cid_length = info->compatible_id_list.list_size +
1021 new_cid->length + sizeof(struct acpica_device_id);
1022 else
1023 new_cid_length = sizeof(struct acpica_device_id_list) + new_cid->length;
1024
1025 cid = ACPI_ALLOCATE_ZEROED(new_cid_length);
1026 if (!cid) {
1027 return NULL;
1028 }
1029
1030 cid->list_size = new_cid_length;
1031 cid->count = info->compatible_id_list.count;
1032 if (new_cid)
1033 cid->count++;
1034 next_id_string = (char *) cid->ids + (cid->count * sizeof(struct acpica_device_id));
1035
1036 /* Copy all existing CIDs */
1037
1038 for (i = 0; i < info->compatible_id_list.count; i++) {
1039 cid_length = info->compatible_id_list.ids[i].length;
1040 cid->ids[i].string = next_id_string;
1041 cid->ids[i].length = cid_length;
1042 993
1043 ACPI_MEMCPY(next_id_string, info->compatible_id_list.ids[i].string, 994 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
1044 cid_length); 995 return hid->id;
1045 996}
1046 next_id_string += cid_length; 997EXPORT_SYMBOL(acpi_device_hid);
1047 }
1048 998
1049 /* Append the new CID */ 999static void acpi_add_id(struct acpi_device *device, const char *dev_id)
1000{
1001 struct acpi_hardware_id *id;
1050 1002
1051 if (new_cid) { 1003 id = kmalloc(sizeof(*id), GFP_KERNEL);
1052 cid->ids[i].string = next_id_string; 1004 if (!id)
1053 cid->ids[i].length = new_cid->length; 1005 return;
1054 1006
1055 ACPI_MEMCPY(next_id_string, new_cid->string, new_cid->length); 1007 id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
1008 if (!id->id) {
1009 kfree(id);
1010 return;
1056 } 1011 }
1057 1012
1058 return cid; 1013 strcpy(id->id, dev_id);
1014 list_add_tail(&id->list, &device->pnp.ids);
1059} 1015}
1060 1016
1061static void acpi_device_set_id(struct acpi_device *device, 1017static void acpi_device_set_id(struct acpi_device *device)
1062 struct acpi_device *parent, acpi_handle handle,
1063 int type)
1064{ 1018{
1065 struct acpi_device_info *info = NULL;
1066 char *hid = NULL;
1067 char *uid = NULL;
1068 struct acpica_device_id_list *cid_list = NULL;
1069 char *cid_add = NULL;
1070 acpi_status status; 1019 acpi_status status;
1020 struct acpi_device_info *info;
1021 struct acpica_device_id_list *cid_list;
1022 int i;
1071 1023
1072 switch (type) { 1024 switch (device->device_type) {
1073 case ACPI_BUS_TYPE_DEVICE: 1025 case ACPI_BUS_TYPE_DEVICE:
1074 status = acpi_get_object_info(handle, &info); 1026 if (ACPI_IS_ROOT_DEVICE(device)) {
1027 acpi_add_id(device, ACPI_SYSTEM_HID);
1028 break;
1029 } else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
1030 /* \_SB_, the only root-level namespace device */
1031 acpi_add_id(device, ACPI_BUS_HID);
1032 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1033 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1034 break;
1035 }
1036
1037 status = acpi_get_object_info(device->handle, &info);
1075 if (ACPI_FAILURE(status)) { 1038 if (ACPI_FAILURE(status)) {
1076 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__); 1039 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
1077 return; 1040 return;
1078 } 1041 }
1079 1042
1080 if (info->valid & ACPI_VALID_HID) 1043 if (info->valid & ACPI_VALID_HID)
1081 hid = info->hardware_id.string; 1044 acpi_add_id(device, info->hardware_id.string);
1082 if (info->valid & ACPI_VALID_UID) 1045 if (info->valid & ACPI_VALID_CID) {
1083 uid = info->unique_id.string;
1084 if (info->valid & ACPI_VALID_CID)
1085 cid_list = &info->compatible_id_list; 1046 cid_list = &info->compatible_id_list;
1047 for (i = 0; i < cid_list->count; i++)
1048 acpi_add_id(device, cid_list->ids[i].string);
1049 }
1086 if (info->valid & ACPI_VALID_ADR) { 1050 if (info->valid & ACPI_VALID_ADR) {
1087 device->pnp.bus_address = info->address; 1051 device->pnp.bus_address = info->address;
1088 device->flags.bus_address = 1; 1052 device->flags.bus_address = 1;
1089 } 1053 }
1090 1054
1091 /* If we have a video/bay/dock device, add our selfdefined 1055 /*
1092 HID to the CID list. Like that the video/bay/dock drivers 1056 * Some devices don't reliably have _HIDs & _CIDs, so add
1093 will get autoloaded and the device might still match 1057 * synthetic HIDs to make sure drivers can find them.
1094 against another driver. 1058 */
1095 */
1096 if (acpi_is_video_device(device)) 1059 if (acpi_is_video_device(device))
1097 cid_add = ACPI_VIDEO_HID; 1060 acpi_add_id(device, ACPI_VIDEO_HID);
1098 else if (ACPI_SUCCESS(acpi_bay_match(device))) 1061 else if (ACPI_SUCCESS(acpi_bay_match(device)))
1099 cid_add = ACPI_BAY_HID; 1062 acpi_add_id(device, ACPI_BAY_HID);
1100 else if (ACPI_SUCCESS(acpi_dock_match(device))) 1063 else if (ACPI_SUCCESS(acpi_dock_match(device)))
1101 cid_add = ACPI_DOCK_HID; 1064 acpi_add_id(device, ACPI_DOCK_HID);
1102 1065
1103 break; 1066 break;
1104 case ACPI_BUS_TYPE_POWER: 1067 case ACPI_BUS_TYPE_POWER:
1105 hid = ACPI_POWER_HID; 1068 acpi_add_id(device, ACPI_POWER_HID);
1106 break; 1069 break;
1107 case ACPI_BUS_TYPE_PROCESSOR: 1070 case ACPI_BUS_TYPE_PROCESSOR:
1108 hid = ACPI_PROCESSOR_OBJECT_HID; 1071 acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
1109 break;
1110 case ACPI_BUS_TYPE_SYSTEM:
1111 hid = ACPI_SYSTEM_HID;
1112 break; 1072 break;
1113 case ACPI_BUS_TYPE_THERMAL: 1073 case ACPI_BUS_TYPE_THERMAL:
1114 hid = ACPI_THERMAL_HID; 1074 acpi_add_id(device, ACPI_THERMAL_HID);
1115 break; 1075 break;
1116 case ACPI_BUS_TYPE_POWER_BUTTON: 1076 case ACPI_BUS_TYPE_POWER_BUTTON:
1117 hid = ACPI_BUTTON_HID_POWERF; 1077 acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
1118 break; 1078 break;
1119 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1079 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1120 hid = ACPI_BUTTON_HID_SLEEPF; 1080 acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
1121 break; 1081 break;
1122 } 1082 }
1123 1083
1124 /* 1084 /*
1125 * \_SB 1085 * We build acpi_devices for some objects that don't have _HID or _CID,
1126 * ---- 1086 * e.g., PCI bridges and slots. Drivers can't bind to these objects,
1127 * Fix for the system root bus device -- the only root-level device. 1087 * but we do use them indirectly by traversing the acpi_device tree.
1088 * This generic ID isn't useful for driver binding, but it provides
1089 * the useful property that "every acpi_device has an ID."
1128 */ 1090 */
1129 if (((acpi_handle)parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) { 1091 if (list_empty(&device->pnp.ids))
1130 hid = ACPI_BUS_HID; 1092 acpi_add_id(device, "device");
1131 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1132 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1133 }
1134
1135 if (hid) {
1136 device->pnp.hardware_id = ACPI_ALLOCATE_ZEROED(strlen (hid) + 1);
1137 if (device->pnp.hardware_id) {
1138 strcpy(device->pnp.hardware_id, hid);
1139 device->flags.hardware_id = 1;
1140 }
1141 }
1142 if (!device->flags.hardware_id)
1143 device->pnp.hardware_id = "";
1144
1145 if (uid) {
1146 device->pnp.unique_id = ACPI_ALLOCATE_ZEROED(strlen (uid) + 1);
1147 if (device->pnp.unique_id) {
1148 strcpy(device->pnp.unique_id, uid);
1149 device->flags.unique_id = 1;
1150 }
1151 }
1152 if (!device->flags.unique_id)
1153 device->pnp.unique_id = "";
1154
1155 if (cid_list || cid_add) {
1156 struct acpica_device_id_list *list;
1157
1158 if (cid_add) {
1159 struct acpica_device_id cid;
1160 cid.length = strlen (cid_add) + 1;
1161 cid.string = cid_add;
1162
1163 list = acpi_add_cid(info, &cid);
1164 } else {
1165 list = acpi_add_cid(info, NULL);
1166 }
1167
1168 if (list) {
1169 device->pnp.cid_list = list;
1170 if (cid_add)
1171 device->flags.compatible_ids = 1;
1172 }
1173 }
1174
1175 kfree(info);
1176} 1093}
1177 1094
1178static int acpi_device_set_context(struct acpi_device *device, int type) 1095static int acpi_device_set_context(struct acpi_device *device)
1179{ 1096{
1180 acpi_status status = AE_OK; 1097 acpi_status status;
1181 int result = 0; 1098
1182 /* 1099 /*
1183 * Context 1100 * Context
1184 * ------- 1101 * -------
1185 * Attach this 'struct acpi_device' to the ACPI object. This makes 1102 * Attach this 'struct acpi_device' to the ACPI object. This makes
1186 * resolutions from handle->device very efficient. Note that we need 1103 * resolutions from handle->device very efficient. Fixed hardware
1187 * to be careful with fixed-feature devices as they all attach to the 1104 * devices have no handles, so we skip them.
1188 * root object.
1189 */ 1105 */
1190 if (type != ACPI_BUS_TYPE_POWER_BUTTON && 1106 if (!device->handle)
1191 type != ACPI_BUS_TYPE_SLEEP_BUTTON) { 1107 return 0;
1192 status = acpi_attach_data(device->handle,
1193 acpi_bus_data_handler, device);
1194 1108
1195 if (ACPI_FAILURE(status)) { 1109 status = acpi_attach_data(device->handle,
1196 printk(KERN_ERR PREFIX "Error attaching device data\n"); 1110 acpi_bus_data_handler, device);
1197 result = -ENODEV; 1111 if (ACPI_SUCCESS(status))
1198 } 1112 return 0;
1199 } 1113
1200 return result; 1114 printk(KERN_ERR PREFIX "Error attaching device data\n");
1115 return -ENODEV;
1201} 1116}
1202 1117
1203static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 1118static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
@@ -1223,17 +1138,14 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
1223 return 0; 1138 return 0;
1224} 1139}
1225 1140
1226static int 1141static int acpi_add_single_object(struct acpi_device **child,
1227acpi_add_single_object(struct acpi_device **child, 1142 acpi_handle handle, int type,
1228 struct acpi_device *parent, acpi_handle handle, int type, 1143 unsigned long long sta,
1229 struct acpi_bus_ops *ops) 1144 struct acpi_bus_ops *ops)
1230{ 1145{
1231 int result = 0; 1146 int result;
1232 struct acpi_device *device = NULL; 1147 struct acpi_device *device;
1233 1148 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1234
1235 if (!child)
1236 return -EINVAL;
1237 1149
1238 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); 1150 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
1239 if (!device) { 1151 if (!device) {
@@ -1241,75 +1153,31 @@ acpi_add_single_object(struct acpi_device **child,
1241 return -ENOMEM; 1153 return -ENOMEM;
1242 } 1154 }
1243 1155
1156 INIT_LIST_HEAD(&device->pnp.ids);
1157 device->device_type = type;
1244 device->handle = handle; 1158 device->handle = handle;
1245 device->parent = parent; 1159 device->parent = acpi_bus_get_parent(handle);
1246 device->bus_ops = *ops; /* workround for not call .start */ 1160 device->bus_ops = *ops; /* workround for not call .start */
1161 STRUCT_TO_INT(device->status) = sta;
1247 1162
1248 1163 acpi_device_get_busid(device);
1249 acpi_device_get_busid(device, handle, type);
1250 1164
1251 /* 1165 /*
1252 * Flags 1166 * Flags
1253 * ----- 1167 * -----
1254 * Get prior to calling acpi_bus_get_status() so we know whether 1168 * Note that we only look for object handles -- cannot evaluate objects
1255 * or not _STA is present. Note that we only look for object 1169 * until we know the device is present and properly initialized.
1256 * handles -- cannot evaluate objects until we know the device is
1257 * present and properly initialized.
1258 */ 1170 */
1259 result = acpi_bus_get_flags(device); 1171 result = acpi_bus_get_flags(device);
1260 if (result) 1172 if (result)
1261 goto end; 1173 goto end;
1262 1174
1263 /* 1175 /*
1264 * Status
1265 * ------
1266 * See if the device is present. We always assume that non-Device
1267 * and non-Processor objects (e.g. thermal zones, power resources,
1268 * etc.) are present, functioning, etc. (at least when parent object
1269 * is present). Note that _STA has a different meaning for some
1270 * objects (e.g. power resources) so we need to be careful how we use
1271 * it.
1272 */
1273 switch (type) {
1274 case ACPI_BUS_TYPE_PROCESSOR:
1275 case ACPI_BUS_TYPE_DEVICE:
1276 result = acpi_bus_get_status(device);
1277 if (ACPI_FAILURE(result)) {
1278 result = -ENODEV;
1279 goto end;
1280 }
1281 /*
1282 * When the device is neither present nor functional, the
1283 * device should not be added to Linux ACPI device tree.
1284 * When the status of the device is not present but functinal,
1285 * it should be added to Linux ACPI tree. For example : bay
1286 * device , dock device.
1287 * In such conditions it is unncessary to check whether it is
1288 * bay device or dock device.
1289 */
1290 if (!device->status.present && !device->status.functional) {
1291 result = -ENODEV;
1292 goto end;
1293 }
1294 break;
1295 default:
1296 STRUCT_TO_INT(device->status) =
1297 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
1298 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
1299 break;
1300 }
1301
1302 /*
1303 * Initialize Device 1176 * Initialize Device
1304 * ----------------- 1177 * -----------------
1305 * TBD: Synch with Core's enumeration/initialization process. 1178 * TBD: Synch with Core's enumeration/initialization process.
1306 */ 1179 */
1307 1180 acpi_device_set_id(device);
1308 /*
1309 * Hardware ID, Unique ID, & Bus Address
1310 * -------------------------------------
1311 */
1312 acpi_device_set_id(device, parent, handle, type);
1313 1181
1314 /* 1182 /*
1315 * Power Management 1183 * Power Management
@@ -1341,10 +1209,10 @@ acpi_add_single_object(struct acpi_device **child,
1341 goto end; 1209 goto end;
1342 } 1210 }
1343 1211
1344 if ((result = acpi_device_set_context(device, type))) 1212 if ((result = acpi_device_set_context(device)))
1345 goto end; 1213 goto end;
1346 1214
1347 result = acpi_device_register(device, parent); 1215 result = acpi_device_register(device);
1348 1216
1349 /* 1217 /*
1350 * Bind _ADR-Based Devices when hot add 1218 * Bind _ADR-Based Devices when hot add
@@ -1355,128 +1223,122 @@ acpi_add_single_object(struct acpi_device **child,
1355 } 1223 }
1356 1224
1357end: 1225end:
1358 if (!result) 1226 if (!result) {
1227 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1228 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1229 "Adding %s [%s] parent %s\n", dev_name(&device->dev),
1230 (char *) buffer.pointer,
1231 device->parent ? dev_name(&device->parent->dev) :
1232 "(null)"));
1233 kfree(buffer.pointer);
1359 *child = device; 1234 *child = device;
1360 else 1235 } else
1361 acpi_device_release(&device->dev); 1236 acpi_device_release(&device->dev);
1362 1237
1363 return result; 1238 return result;
1364} 1239}
1365 1240
1366static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops) 1241#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
1242 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
1243
1244static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1245 unsigned long long *sta)
1367{ 1246{
1368 acpi_status status = AE_OK; 1247 acpi_status status;
1369 struct acpi_device *parent = NULL; 1248 acpi_object_type acpi_type;
1370 struct acpi_device *child = NULL;
1371 acpi_handle phandle = NULL;
1372 acpi_handle chandle = NULL;
1373 acpi_object_type type = 0;
1374 u32 level = 1;
1375 1249
1250 status = acpi_get_type(handle, &acpi_type);
1251 if (ACPI_FAILURE(status))
1252 return -ENODEV;
1376 1253
1377 if (!start) 1254 switch (acpi_type) {
1378 return -EINVAL; 1255 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
1256 case ACPI_TYPE_DEVICE:
1257 *type = ACPI_BUS_TYPE_DEVICE;
1258 status = acpi_bus_get_status_handle(handle, sta);
1259 if (ACPI_FAILURE(status))
1260 return -ENODEV;
1261 break;
1262 case ACPI_TYPE_PROCESSOR:
1263 *type = ACPI_BUS_TYPE_PROCESSOR;
1264 status = acpi_bus_get_status_handle(handle, sta);
1265 if (ACPI_FAILURE(status))
1266 return -ENODEV;
1267 break;
1268 case ACPI_TYPE_THERMAL:
1269 *type = ACPI_BUS_TYPE_THERMAL;
1270 *sta = ACPI_STA_DEFAULT;
1271 break;
1272 case ACPI_TYPE_POWER:
1273 *type = ACPI_BUS_TYPE_POWER;
1274 *sta = ACPI_STA_DEFAULT;
1275 break;
1276 default:
1277 return -ENODEV;
1278 }
1379 1279
1380 parent = start; 1280 return 0;
1381 phandle = start->handle; 1281}
1382 1282
1383 /* 1283static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
1384 * Parse through the ACPI namespace, identify all 'devices', and 1284 void *context, void **return_value)
1385 * create a new 'struct acpi_device' for each. 1285{
1386 */ 1286 struct acpi_bus_ops *ops = context;
1387 while ((level > 0) && parent) { 1287 int type;
1288 unsigned long long sta;
1289 struct acpi_device *device;
1290 acpi_status status;
1291 int result;
1388 1292
1389 status = acpi_get_next_object(ACPI_TYPE_ANY, phandle, 1293 result = acpi_bus_type_and_status(handle, &type, &sta);
1390 chandle, &chandle); 1294 if (result)
1295 return AE_OK;
1391 1296
1392 /* 1297 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1393 * If this scope is exhausted then move our way back up. 1298 !(sta & ACPI_STA_DEVICE_FUNCTIONING))
1394 */ 1299 return AE_CTRL_DEPTH;
1395 if (ACPI_FAILURE(status)) {
1396 level--;
1397 chandle = phandle;
1398 acpi_get_parent(phandle, &phandle);
1399 if (parent->parent)
1400 parent = parent->parent;
1401 continue;
1402 }
1403 1300
1404 status = acpi_get_type(chandle, &type); 1301 /*
1405 if (ACPI_FAILURE(status)) 1302 * We may already have an acpi_device from a previous enumeration. If
1406 continue; 1303 * so, we needn't add it again, but we may still have to start it.
1304 */
1305 device = NULL;
1306 acpi_bus_get_device(handle, &device);
1307 if (ops->acpi_op_add && !device)
1308 acpi_add_single_object(&device, handle, type, sta, ops);
1407 1309
1408 /* 1310 if (!device)
1409 * If this is a scope object then parse it (depth-first). 1311 return AE_CTRL_DEPTH;
1410 */
1411 if (type == ACPI_TYPE_LOCAL_SCOPE) {
1412 level++;
1413 phandle = chandle;
1414 chandle = NULL;
1415 continue;
1416 }
1417 1312
1418 /* 1313 if (ops->acpi_op_start && !(ops->acpi_op_add)) {
1419 * We're only interested in objects that we consider 'devices'. 1314 status = acpi_start_single_object(device);
1420 */ 1315 if (ACPI_FAILURE(status))
1421 switch (type) { 1316 return AE_CTRL_DEPTH;
1422 case ACPI_TYPE_DEVICE: 1317 }
1423 type = ACPI_BUS_TYPE_DEVICE;
1424 break;
1425 case ACPI_TYPE_PROCESSOR:
1426 type = ACPI_BUS_TYPE_PROCESSOR;
1427 break;
1428 case ACPI_TYPE_THERMAL:
1429 type = ACPI_BUS_TYPE_THERMAL;
1430 break;
1431 case ACPI_TYPE_POWER:
1432 type = ACPI_BUS_TYPE_POWER;
1433 break;
1434 default:
1435 continue;
1436 }
1437 1318
1438 if (ops->acpi_op_add) 1319 if (!*return_value)
1439 status = acpi_add_single_object(&child, parent, 1320 *return_value = device;
1440 chandle, type, ops); 1321 return AE_OK;
1441 else 1322}
1442 status = acpi_bus_get_device(chandle, &child);
1443 1323
1444 if (ACPI_FAILURE(status)) 1324static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1445 continue; 1325 struct acpi_device **child)
1326{
1327 acpi_status status;
1328 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1329 void *device = NULL;
1446 1330
1447 if (ops->acpi_op_start && !(ops->acpi_op_add)) { 1331 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1448 status = acpi_start_single_object(child); 1332 printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
1449 if (ACPI_FAILURE(status)) 1333 (char *) buffer.pointer);
1450 continue;
1451 }
1452 1334
1453 /* 1335 status = acpi_bus_check_add(handle, 0, ops, &device);
1454 * If the device is present, enabled, and functioning then 1336 if (ACPI_SUCCESS(status))
1455 * parse its scope (depth-first). Note that we need to 1337 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
1456 * represent absent devices to facilitate PnP notifications 1338 acpi_bus_check_add, ops, &device);
1457 * -- but only the subtree head (not all of its children,
1458 * which will be enumerated when the parent is inserted).
1459 *
1460 * TBD: Need notifications and other detection mechanisms
1461 * in place before we can fully implement this.
1462 */
1463 /*
1464 * When the device is not present but functional, it is also
1465 * necessary to scan the children of this device.
1466 */
1467 if (child->status.present || (!child->status.present &&
1468 child->status.functional)) {
1469 status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
1470 NULL, NULL);
1471 if (ACPI_SUCCESS(status)) {
1472 level++;
1473 phandle = chandle;
1474 chandle = NULL;
1475 parent = child;
1476 }
1477 }
1478 }
1479 1339
1340 if (child)
1341 *child = device;
1480 return 0; 1342 return 0;
1481} 1343}
1482 1344
@@ -1484,36 +1346,25 @@ int
1484acpi_bus_add(struct acpi_device **child, 1346acpi_bus_add(struct acpi_device **child,
1485 struct acpi_device *parent, acpi_handle handle, int type) 1347 struct acpi_device *parent, acpi_handle handle, int type)
1486{ 1348{
1487 int result;
1488 struct acpi_bus_ops ops; 1349 struct acpi_bus_ops ops;
1489 1350
1490 memset(&ops, 0, sizeof(ops)); 1351 memset(&ops, 0, sizeof(ops));
1491 ops.acpi_op_add = 1; 1352 ops.acpi_op_add = 1;
1492 1353
1493 result = acpi_add_single_object(child, parent, handle, type, &ops); 1354 acpi_bus_scan(handle, &ops, child);
1494 if (!result) 1355 return 0;
1495 result = acpi_bus_scan(*child, &ops);
1496
1497 return result;
1498} 1356}
1499EXPORT_SYMBOL(acpi_bus_add); 1357EXPORT_SYMBOL(acpi_bus_add);
1500 1358
1501int acpi_bus_start(struct acpi_device *device) 1359int acpi_bus_start(struct acpi_device *device)
1502{ 1360{
1503 int result;
1504 struct acpi_bus_ops ops; 1361 struct acpi_bus_ops ops;
1505 1362
1363 memset(&ops, 0, sizeof(ops));
1364 ops.acpi_op_start = 1;
1506 1365
1507 if (!device) 1366 acpi_bus_scan(device->handle, &ops, NULL);
1508 return -EINVAL; 1367 return 0;
1509
1510 result = acpi_start_single_object(device);
1511 if (!result) {
1512 memset(&ops, 0, sizeof(ops));
1513 ops.acpi_op_start = 1;
1514 result = acpi_bus_scan(device, &ops);
1515 }
1516 return result;
1517} 1368}
1518EXPORT_SYMBOL(acpi_bus_start); 1369EXPORT_SYMBOL(acpi_bus_start);
1519 1370
@@ -1572,15 +1423,12 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice)
1572} 1423}
1573EXPORT_SYMBOL_GPL(acpi_bus_trim); 1424EXPORT_SYMBOL_GPL(acpi_bus_trim);
1574 1425
1575static int acpi_bus_scan_fixed(struct acpi_device *root) 1426static int acpi_bus_scan_fixed(void)
1576{ 1427{
1577 int result = 0; 1428 int result = 0;
1578 struct acpi_device *device = NULL; 1429 struct acpi_device *device = NULL;
1579 struct acpi_bus_ops ops; 1430 struct acpi_bus_ops ops;
1580 1431
1581 if (!root)
1582 return -ENODEV;
1583
1584 memset(&ops, 0, sizeof(ops)); 1432 memset(&ops, 0, sizeof(ops));
1585 ops.acpi_op_add = 1; 1433 ops.acpi_op_add = 1;
1586 ops.acpi_op_start = 1; 1434 ops.acpi_op_start = 1;
@@ -1589,16 +1437,16 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1589 * Enumerate all fixed-feature devices. 1437 * Enumerate all fixed-feature devices.
1590 */ 1438 */
1591 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) { 1439 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
1592 result = acpi_add_single_object(&device, acpi_root, 1440 result = acpi_add_single_object(&device, NULL,
1593 NULL,
1594 ACPI_BUS_TYPE_POWER_BUTTON, 1441 ACPI_BUS_TYPE_POWER_BUTTON,
1442 ACPI_STA_DEFAULT,
1595 &ops); 1443 &ops);
1596 } 1444 }
1597 1445
1598 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1446 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1599 result = acpi_add_single_object(&device, acpi_root, 1447 result = acpi_add_single_object(&device, NULL,
1600 NULL,
1601 ACPI_BUS_TYPE_SLEEP_BUTTON, 1448 ACPI_BUS_TYPE_SLEEP_BUTTON,
1449 ACPI_STA_DEFAULT,
1602 &ops); 1450 &ops);
1603 } 1451 }
1604 1452
@@ -1621,24 +1469,15 @@ int __init acpi_scan_init(void)
1621 } 1469 }
1622 1470
1623 /* 1471 /*
1624 * Create the root device in the bus's device tree
1625 */
1626 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1627 ACPI_BUS_TYPE_SYSTEM, &ops);
1628 if (result)
1629 goto Done;
1630
1631 /*
1632 * Enumerate devices in the ACPI namespace. 1472 * Enumerate devices in the ACPI namespace.
1633 */ 1473 */
1634 result = acpi_bus_scan_fixed(acpi_root); 1474 result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
1635 1475
1636 if (!result) 1476 if (!result)
1637 result = acpi_bus_scan(acpi_root, &ops); 1477 result = acpi_bus_scan_fixed();
1638 1478
1639 if (result) 1479 if (result)
1640 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1480 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
1641 1481
1642Done:
1643 return result; 1482 return result;
1644} 1483}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 94b1a4c5abab..a4fddb24476f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1986,6 +1986,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1986 1986
1987 result = acpi_video_device_lcd_set_level(device, level_next); 1987 result = acpi_video_device_lcd_set_level(device, level_next);
1988 1988
1989 if (!result)
1990 backlight_force_update(device->backlight,
1991 BACKLIGHT_UPDATE_HOTKEY);
1992
1989out: 1993out:
1990 if (result) 1994 if (result)
1991 printk(KERN_ERR PREFIX "Failed to switch the brightness\n"); 1995 printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 276a046ac93f..b4a55d407bf5 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -369,9 +369,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
369 goto err; 369 goto err;
370 370
371 snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), 371 snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
372 "SMBus CMI adapter %s (%s)", 372 "SMBus CMI adapter %s",
373 acpi_device_name(device), 373 acpi_device_name(device));
374 acpi_device_uid(device));
375 smbus_cmi->adapter.owner = THIS_MODULE; 374 smbus_cmi->adapter.owner = THIS_MODULE;
376 smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm; 375 smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
377 smbus_cmi->adapter.algo_data = smbus_cmi; 376 smbus_cmi->adapter.algo_data = smbus_cmi;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7c8e7122aaa9..e4f599f20e38 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -150,9 +150,9 @@ config LEDS_LP3944
150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" 150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
151 depends on LEDS_CLASS && I2C 151 depends on LEDS_CLASS && I2C
152 help 152 help
153 This option enables support for LEDs connected to the National 153 This option enables support for LEDs connected to the National
154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as 154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as
155 Fun Light Chip. 155 Fun Light Chip.
156 156
157 To compile this driver as a module, choose M here: the 157 To compile this driver as a module, choose M here: the
158 module will be called leds-lp3944. 158 module will be called leds-lp3944.
@@ -195,6 +195,13 @@ config LEDS_PCA955X
195 LED driver chips accessed via the I2C bus. Supported 195 LED driver chips accessed via the I2C bus. Supported
196 devices include PCA9550, PCA9551, PCA9552, and PCA9553. 196 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
197 197
198config LEDS_WM831X_STATUS
199 tristate "LED support for status LEDs on WM831x PMICs"
200 depends on LEDS_CLASS && MFD_WM831X
201 help
202 This option enables support for the status LEDs of the WM831x
203 series of PMICs.
204
198config LEDS_WM8350 205config LEDS_WM8350
199 tristate "LED Support for WM8350 AudioPlus PMIC" 206 tristate "LED Support for WM8350 AudioPlus PMIC"
200 depends on LEDS_CLASS && MFD_WM8350 207 depends on LEDS_CLASS && MFD_WM8350
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e8cdcf77a4c3..46d72704d606 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
27obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 27obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
28obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 28obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
29obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
30obj-$(CONFIG_LEDS_PWM) += leds-pwm.o 31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
31 32
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index f2242db54016..a498135a4e80 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -153,7 +153,7 @@ static struct led_classdev clevo_mail_led = {
153 .flags = LED_CORE_SUSPENDRESUME, 153 .flags = LED_CORE_SUSPENDRESUME,
154}; 154};
155 155
156static int __init clevo_mail_led_probe(struct platform_device *pdev) 156static int __devinit clevo_mail_led_probe(struct platform_device *pdev)
157{ 157{
158 return led_classdev_register(&pdev->dev, &clevo_mail_led); 158 return led_classdev_register(&pdev->dev, &clevo_mail_led);
159} 159}
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 059aa2924b1c..8816806accd2 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -28,7 +28,7 @@ static void qube_front_led_set(struct led_classdev *led_cdev,
28} 28}
29 29
30static struct led_classdev qube_front_led = { 30static struct led_classdev qube_front_led = {
31 .name = "qube-front", 31 .name = "qube::front",
32 .brightness = LED_FULL, 32 .brightness = LED_FULL,
33 .brightness_set = qube_front_led_set, 33 .brightness_set = qube_front_led_set,
34 .default_trigger = "ide-disk", 34 .default_trigger = "ide-disk",
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index 5f1ce810815f..defc212105f3 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -49,7 +49,7 @@ static void raq_web_led_set(struct led_classdev *led_cdev,
49} 49}
50 50
51static struct led_classdev raq_web_led = { 51static struct led_classdev raq_web_led = {
52 .name = "raq-web", 52 .name = "raq::web",
53 .brightness_set = raq_web_led_set, 53 .brightness_set = raq_web_led_set,
54}; 54};
55 55
@@ -70,7 +70,7 @@ static void raq_power_off_led_set(struct led_classdev *led_cdev,
70} 70}
71 71
72static struct led_classdev raq_power_off_led = { 72static struct led_classdev raq_power_off_led = {
73 .name = "raq-power-off", 73 .name = "raq::power-off",
74 .brightness_set = raq_power_off_led_set, 74 .brightness_set = raq_power_off_led_set,
75 .default_trigger = "power-off", 75 .default_trigger = "power-off",
76}; 76};
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 6b06638eb5b4..7467980b8cf9 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -80,7 +80,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
80 80
81 /* skip leds that aren't available */ 81 /* skip leds that aren't available */
82 if (!gpio_is_valid(template->gpio)) { 82 if (!gpio_is_valid(template->gpio)) {
83 printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", 83 printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
84 template->gpio, template->name); 84 template->gpio, template->name);
85 return 0; 85 return 0;
86 } 86 }
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index dba8921240f2..708a8017c21d 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -34,7 +34,7 @@ struct pca9532_data {
34 struct i2c_client *client; 34 struct i2c_client *client;
35 struct pca9532_led leds[16]; 35 struct pca9532_led leds[16];
36 struct mutex update_lock; 36 struct mutex update_lock;
37 struct input_dev *idev; 37 struct input_dev *idev;
38 struct work_struct work; 38 struct work_struct work;
39 u8 pwm[2]; 39 u8 pwm[2];
40 u8 psc[2]; 40 u8 psc[2];
@@ -53,9 +53,9 @@ MODULE_DEVICE_TABLE(i2c, pca9532_id);
53 53
54static struct i2c_driver pca9532_driver = { 54static struct i2c_driver pca9532_driver = {
55 .driver = { 55 .driver = {
56 .name = "pca9532", 56 .name = "pca9532",
57 }, 57 },
58 .probe = pca9532_probe, 58 .probe = pca9532_probe,
59 .remove = pca9532_remove, 59 .remove = pca9532_remove,
60 .id_table = pca9532_id, 60 .id_table = pca9532_id,
61}; 61};
@@ -149,7 +149,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
149 149
150 if (*delay_on == 0 && *delay_off == 0) { 150 if (*delay_on == 0 && *delay_off == 0) {
151 /* led subsystem ask us for a blink rate */ 151 /* led subsystem ask us for a blink rate */
152 *delay_on = 1000; 152 *delay_on = 1000;
153 *delay_off = 1000; 153 *delay_off = 1000;
154 } 154 }
155 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6) 155 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
@@ -227,7 +227,7 @@ static int pca9532_configure(struct i2c_client *client,
227 break; 227 break;
228 case PCA9532_TYPE_LED: 228 case PCA9532_TYPE_LED:
229 led->state = pled->state; 229 led->state = pled->state;
230 led->name = pled->name; 230 led->name = pled->name;
231 led->ldev.name = led->name; 231 led->ldev.name = led->name;
232 led->ldev.brightness = LED_OFF; 232 led->ldev.brightness = LED_OFF;
233 led->ldev.brightness_set = pca9532_set_brightness; 233 led->ldev.brightness_set = pca9532_set_brightness;
@@ -254,7 +254,7 @@ static int pca9532_configure(struct i2c_client *client,
254 data->idev->name = pled->name; 254 data->idev->name = pled->name;
255 data->idev->phys = "i2c/pca9532"; 255 data->idev->phys = "i2c/pca9532";
256 data->idev->id.bustype = BUS_HOST; 256 data->idev->id.bustype = BUS_HOST;
257 data->idev->id.vendor = 0x001f; 257 data->idev->id.vendor = 0x001f;
258 data->idev->id.product = 0x0001; 258 data->idev->id.product = 0x0001;
259 data->idev->id.version = 0x0100; 259 data->idev->id.version = 0x0100;
260 data->idev->evbit[0] = BIT_MASK(EV_SND); 260 data->idev->evbit[0] = BIT_MASK(EV_SND);
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
new file mode 100644
index 000000000000..c586d05e336a
--- /dev/null
+++ b/drivers/leds/leds-wm831x-status.c
@@ -0,0 +1,341 @@
1/*
2 * LED driver for WM831x status LEDs
3 *
4 * Copyright(C) 2009 Wolfson Microelectronics PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/leds.h>
16#include <linux/err.h>
17#include <linux/mfd/wm831x/core.h>
18#include <linux/mfd/wm831x/pdata.h>
19#include <linux/mfd/wm831x/status.h>
20
21
22struct wm831x_status {
23 struct led_classdev cdev;
24 struct wm831x *wm831x;
25 struct work_struct work;
26 struct mutex mutex;
27
28 spinlock_t value_lock;
29 int reg; /* Control register */
30 int reg_val; /* Control register value */
31
32 int blink;
33 int blink_time;
34 int blink_cyc;
35 int src;
36 enum led_brightness brightness;
37};
38
39#define to_wm831x_status(led_cdev) \
40 container_of(led_cdev, struct wm831x_status, cdev)
41
42static void wm831x_status_work(struct work_struct *work)
43{
44 struct wm831x_status *led = container_of(work, struct wm831x_status,
45 work);
46 unsigned long flags;
47
48 mutex_lock(&led->mutex);
49
50 led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK |
51 WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK);
52
53 spin_lock_irqsave(&led->value_lock, flags);
54
55 led->reg_val |= led->src << WM831X_LED_SRC_SHIFT;
56 if (led->blink) {
57 led->reg_val |= 2 << WM831X_LED_MODE_SHIFT;
58 led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT;
59 led->reg_val |= led->blink_cyc;
60 } else {
61 if (led->brightness != LED_OFF)
62 led->reg_val |= 1 << WM831X_LED_MODE_SHIFT;
63 }
64
65 spin_unlock_irqrestore(&led->value_lock, flags);
66
67 wm831x_reg_write(led->wm831x, led->reg, led->reg_val);
68
69 mutex_unlock(&led->mutex);
70}
71
72static void wm831x_status_set(struct led_classdev *led_cdev,
73 enum led_brightness value)
74{
75 struct wm831x_status *led = to_wm831x_status(led_cdev);
76 unsigned long flags;
77
78 spin_lock_irqsave(&led->value_lock, flags);
79 led->brightness = value;
80 if (value == LED_OFF)
81 led->blink = 0;
82 schedule_work(&led->work);
83 spin_unlock_irqrestore(&led->value_lock, flags);
84}
85
86static int wm831x_status_blink_set(struct led_classdev *led_cdev,
87 unsigned long *delay_on,
88 unsigned long *delay_off)
89{
90 struct wm831x_status *led = to_wm831x_status(led_cdev);
91 unsigned long flags;
92 int ret = 0;
93
94 /* Pick some defaults if we've not been given times */
95 if (*delay_on == 0 && *delay_off == 0) {
96 *delay_on = 250;
97 *delay_off = 250;
98 }
99
100 spin_lock_irqsave(&led->value_lock, flags);
101
102 /* We only have a limited selection of settings, see if we can
103 * support the configuration we're being given */
104 switch (*delay_on) {
105 case 1000:
106 led->blink_time = 0;
107 break;
108 case 250:
109 led->blink_time = 1;
110 break;
111 case 125:
112 led->blink_time = 2;
113 break;
114 case 62:
115 case 63:
116 /* Actually 62.5ms */
117 led->blink_time = 3;
118 break;
119 default:
120 ret = -EINVAL;
121 break;
122 }
123
124 if (ret == 0) {
125 switch (*delay_off / *delay_on) {
126 case 1:
127 led->blink_cyc = 0;
128 break;
129 case 3:
130 led->blink_cyc = 1;
131 break;
132 case 4:
133 led->blink_cyc = 2;
134 break;
135 case 8:
136 led->blink_cyc = 3;
137 break;
138 default:
139 ret = -EINVAL;
140 break;
141 }
142 }
143
144 if (ret == 0)
145 led->blink = 1;
146 else
147 led->blink = 0;
148
149 /* Always update; if we fail turn off blinking since we expect
150 * a software fallback. */
151 schedule_work(&led->work);
152
153 spin_unlock_irqrestore(&led->value_lock, flags);
154
155 return ret;
156}
157
158static const char *led_src_texts[] = {
159 "otp",
160 "power",
161 "charger",
162 "soft",
163};
164
165static ssize_t wm831x_status_src_show(struct device *dev,
166 struct device_attribute *attr, char *buf)
167{
168 struct led_classdev *led_cdev = dev_get_drvdata(dev);
169 struct wm831x_status *led = to_wm831x_status(led_cdev);
170 int i;
171 ssize_t ret = 0;
172
173 mutex_lock(&led->mutex);
174
175 for (i = 0; i < ARRAY_SIZE(led_src_texts); i++)
176 if (i == led->src)
177 ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]);
178 else
179 ret += sprintf(&buf[ret], "%s ", led_src_texts[i]);
180
181 mutex_unlock(&led->mutex);
182
183 ret += sprintf(&buf[ret], "\n");
184
185 return ret;
186}
187
188static ssize_t wm831x_status_src_store(struct device *dev,
189 struct device_attribute *attr,
190 const char *buf, size_t size)
191{
192 struct led_classdev *led_cdev = dev_get_drvdata(dev);
193 struct wm831x_status *led = to_wm831x_status(led_cdev);
194 char name[20];
195 int i;
196 size_t len;
197
198 name[sizeof(name) - 1] = '\0';
199 strncpy(name, buf, sizeof(name) - 1);
200 len = strlen(name);
201
202 if (len && name[len - 1] == '\n')
203 name[len - 1] = '\0';
204
205 for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) {
206 if (!strcmp(name, led_src_texts[i])) {
207 mutex_lock(&led->mutex);
208
209 led->src = i;
210 schedule_work(&led->work);
211
212 mutex_unlock(&led->mutex);
213 }
214 }
215
216 return size;
217}
218
219static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
220
221static int wm831x_status_probe(struct platform_device *pdev)
222{
223 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
224 struct wm831x_pdata *chip_pdata;
225 struct wm831x_status_pdata pdata;
226 struct wm831x_status *drvdata;
227 struct resource *res;
228 int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
229 int ret;
230
231 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
232 if (res == NULL) {
233 dev_err(&pdev->dev, "No I/O resource\n");
234 ret = -EINVAL;
235 goto err;
236 }
237
238 drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
239 if (!drvdata)
240 return -ENOMEM;
241 dev_set_drvdata(&pdev->dev, drvdata);
242
243 drvdata->wm831x = wm831x;
244 drvdata->reg = res->start;
245
246 if (wm831x->dev->platform_data)
247 chip_pdata = wm831x->dev->platform_data;
248 else
249 chip_pdata = NULL;
250
251 memset(&pdata, 0, sizeof(pdata));
252 if (chip_pdata && chip_pdata->status[id])
253 memcpy(&pdata, chip_pdata->status[id], sizeof(pdata));
254 else
255 pdata.name = dev_name(&pdev->dev);
256
257 mutex_init(&drvdata->mutex);
258 INIT_WORK(&drvdata->work, wm831x_status_work);
259 spin_lock_init(&drvdata->value_lock);
260
261 /* We cache the configuration register and read startup values
262 * from it. */
263 drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg);
264
265 if (drvdata->reg_val & WM831X_LED_MODE_MASK)
266 drvdata->brightness = LED_FULL;
267 else
268 drvdata->brightness = LED_OFF;
269
270 /* Set a default source if configured, otherwise leave the
271 * current hardware setting.
272 */
273 if (pdata.default_src == WM831X_STATUS_PRESERVE) {
274 drvdata->src = drvdata->reg_val;
275 drvdata->src &= WM831X_LED_SRC_MASK;
276 drvdata->src >>= WM831X_LED_SRC_SHIFT;
277 } else {
278 drvdata->src = pdata.default_src - 1;
279 }
280
281 drvdata->cdev.name = pdata.name;
282 drvdata->cdev.default_trigger = pdata.default_trigger;
283 drvdata->cdev.brightness_set = wm831x_status_set;
284 drvdata->cdev.blink_set = wm831x_status_blink_set;
285
286 ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
287 if (ret < 0) {
288 dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
289 goto err_led;
290 }
291
292 ret = device_create_file(drvdata->cdev.dev, &dev_attr_src);
293 if (ret != 0)
294 dev_err(&pdev->dev,
295 "No source control for LED: %d\n", ret);
296
297 return 0;
298
299err_led:
300 led_classdev_unregister(&drvdata->cdev);
301 kfree(drvdata);
302err:
303 return ret;
304}
305
306static int wm831x_status_remove(struct platform_device *pdev)
307{
308 struct wm831x_status *drvdata = platform_get_drvdata(pdev);
309
310 device_remove_file(drvdata->cdev.dev, &dev_attr_src);
311 led_classdev_unregister(&drvdata->cdev);
312 kfree(drvdata);
313
314 return 0;
315}
316
317static struct platform_driver wm831x_status_driver = {
318 .driver = {
319 .name = "wm831x-status",
320 .owner = THIS_MODULE,
321 },
322 .probe = wm831x_status_probe,
323 .remove = wm831x_status_remove,
324};
325
326static int __devinit wm831x_status_init(void)
327{
328 return platform_driver_register(&wm831x_status_driver);
329}
330module_init(wm831x_status_init);
331
332static void wm831x_status_exit(void)
333{
334 platform_driver_unregister(&wm831x_status_driver);
335}
336module_exit(wm831x_status_exit);
337
338MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
339MODULE_DESCRIPTION("WM831x status LED driver");
340MODULE_LICENSE("GPL");
341MODULE_ALIAS("platform:wm831x-status");
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 1bc5db4ece0d..f5913372d691 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -44,22 +44,22 @@ static void gpio_trig_work(struct work_struct *work)
44 struct gpio_trig_data, work); 44 struct gpio_trig_data, work);
45 int tmp; 45 int tmp;
46 46
47 if (!gpio_data->gpio) 47 if (!gpio_data->gpio)
48 return; 48 return;
49 49
50 tmp = gpio_get_value(gpio_data->gpio); 50 tmp = gpio_get_value(gpio_data->gpio);
51 if (gpio_data->inverted) 51 if (gpio_data->inverted)
52 tmp = !tmp; 52 tmp = !tmp;
53 53
54 if (tmp) { 54 if (tmp) {
55 if (gpio_data->desired_brightness) 55 if (gpio_data->desired_brightness)
56 led_set_brightness(gpio_data->led, 56 led_set_brightness(gpio_data->led,
57 gpio_data->desired_brightness); 57 gpio_data->desired_brightness);
58 else 58 else
59 led_set_brightness(gpio_data->led, LED_FULL); 59 led_set_brightness(gpio_data->led, LED_FULL);
60 } else { 60 } else {
61 led_set_brightness(gpio_data->led, LED_OFF); 61 led_set_brightness(gpio_data->led, LED_OFF);
62 } 62 }
63} 63}
64 64
65static ssize_t gpio_trig_brightness_show(struct device *dev, 65static ssize_t gpio_trig_brightness_show(struct device *dev,
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 55ad95671387..d242976bcfe7 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -72,7 +72,7 @@ static void pmu_led_set(struct led_classdev *led_cdev,
72} 72}
73 73
74static struct led_classdev pmu_led = { 74static struct led_classdev pmu_led = {
75 .name = "pmu-front-led", 75 .name = "pmu-led::front",
76#ifdef CONFIG_ADB_PMU_LED_IDE 76#ifdef CONFIG_ADB_PMU_LED_IDE
77 .default_trigger = "ide-disk", 77 .default_trigger = "ide-disk",
78#endif 78#endif
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
new file mode 100644
index 000000000000..f67ae285a35a
--- /dev/null
+++ b/drivers/net/can/at91_can.c
@@ -0,0 +1,1186 @@
1/*
2 * at91_can.c - CAN network driver for AT91 SoC CAN controller
3 *
4 * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
5 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
6 *
7 * This software may be distributed under the terms of the GNU General
8 * Public License ("GPL") version 2 as distributed in the 'COPYING'
9 * file from the main directory of the linux kernel source.
10 *
11 * Send feedback to <socketcan-users@lists.berlios.de>
12 *
13 *
14 * Your platform definition file should specify something like:
15 *
16 * static struct at91_can_data ek_can_data = {
17 * transceiver_switch = sam9263ek_transceiver_switch,
18 * };
19 *
20 * at91_add_device_can(&ek_can_data);
21 *
22 */
23
24#include <linux/clk.h>
25#include <linux/errno.h>
26#include <linux/if_arp.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/netdevice.h>
32#include <linux/platform_device.h>
33#include <linux/skbuff.h>
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37
38#include <linux/can.h>
39#include <linux/can/dev.h>
40#include <linux/can/error.h>
41
42#include <mach/board.h>
43
44#define DRV_NAME "at91_can"
45#define AT91_NAPI_WEIGHT 12
46
47/*
48 * RX/TX Mailbox split
49 * don't dare to touch
50 */
51#define AT91_MB_RX_NUM 12
52#define AT91_MB_TX_SHIFT 2
53
54#define AT91_MB_RX_FIRST 0
55#define AT91_MB_RX_LAST (AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
56
57#define AT91_MB_RX_MASK(i) ((1 << (i)) - 1)
58#define AT91_MB_RX_SPLIT 8
59#define AT91_MB_RX_LOW_LAST (AT91_MB_RX_SPLIT - 1)
60#define AT91_MB_RX_LOW_MASK (AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
61
62#define AT91_MB_TX_NUM (1 << AT91_MB_TX_SHIFT)
63#define AT91_MB_TX_FIRST (AT91_MB_RX_LAST + 1)
64#define AT91_MB_TX_LAST (AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
65
66#define AT91_NEXT_PRIO_SHIFT (AT91_MB_TX_SHIFT)
67#define AT91_NEXT_PRIO_MASK (0xf << AT91_MB_TX_SHIFT)
68#define AT91_NEXT_MB_MASK (AT91_MB_TX_NUM - 1)
69#define AT91_NEXT_MASK ((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
70
71/* Common registers */
72enum at91_reg {
73 AT91_MR = 0x000,
74 AT91_IER = 0x004,
75 AT91_IDR = 0x008,
76 AT91_IMR = 0x00C,
77 AT91_SR = 0x010,
78 AT91_BR = 0x014,
79 AT91_TIM = 0x018,
80 AT91_TIMESTP = 0x01C,
81 AT91_ECR = 0x020,
82 AT91_TCR = 0x024,
83 AT91_ACR = 0x028,
84};
85
86/* Mailbox registers (0 <= i <= 15) */
87#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
88#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
89#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
90#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
91#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
92#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
93#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
94#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
95
96/* Register bits */
97#define AT91_MR_CANEN BIT(0)
98#define AT91_MR_LPM BIT(1)
99#define AT91_MR_ABM BIT(2)
100#define AT91_MR_OVL BIT(3)
101#define AT91_MR_TEOF BIT(4)
102#define AT91_MR_TTM BIT(5)
103#define AT91_MR_TIMFRZ BIT(6)
104#define AT91_MR_DRPT BIT(7)
105
106#define AT91_SR_RBSY BIT(29)
107
108#define AT91_MMR_PRIO_SHIFT (16)
109
110#define AT91_MID_MIDE BIT(29)
111
112#define AT91_MSR_MRTR BIT(20)
113#define AT91_MSR_MABT BIT(22)
114#define AT91_MSR_MRDY BIT(23)
115#define AT91_MSR_MMI BIT(24)
116
117#define AT91_MCR_MRTR BIT(20)
118#define AT91_MCR_MTCR BIT(23)
119
120/* Mailbox Modes */
121enum at91_mb_mode {
122 AT91_MB_MODE_DISABLED = 0,
123 AT91_MB_MODE_RX = 1,
124 AT91_MB_MODE_RX_OVRWR = 2,
125 AT91_MB_MODE_TX = 3,
126 AT91_MB_MODE_CONSUMER = 4,
127 AT91_MB_MODE_PRODUCER = 5,
128};
129
130/* Interrupt mask bits */
131#define AT91_IRQ_MB_RX ((1 << (AT91_MB_RX_LAST + 1)) \
132 - (1 << AT91_MB_RX_FIRST))
133#define AT91_IRQ_MB_TX ((1 << (AT91_MB_TX_LAST + 1)) \
134 - (1 << AT91_MB_TX_FIRST))
135#define AT91_IRQ_MB_ALL (AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
136
137#define AT91_IRQ_ERRA (1 << 16)
138#define AT91_IRQ_WARN (1 << 17)
139#define AT91_IRQ_ERRP (1 << 18)
140#define AT91_IRQ_BOFF (1 << 19)
141#define AT91_IRQ_SLEEP (1 << 20)
142#define AT91_IRQ_WAKEUP (1 << 21)
143#define AT91_IRQ_TOVF (1 << 22)
144#define AT91_IRQ_TSTP (1 << 23)
145#define AT91_IRQ_CERR (1 << 24)
146#define AT91_IRQ_SERR (1 << 25)
147#define AT91_IRQ_AERR (1 << 26)
148#define AT91_IRQ_FERR (1 << 27)
149#define AT91_IRQ_BERR (1 << 28)
150
151#define AT91_IRQ_ERR_ALL (0x1fff0000)
152#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
153 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
154#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
155 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
156
157#define AT91_IRQ_ALL (0x1fffffff)
158
159struct at91_priv {
160 struct can_priv can; /* must be the first member! */
161 struct net_device *dev;
162 struct napi_struct napi;
163
164 void __iomem *reg_base;
165
166 u32 reg_sr;
167 unsigned int tx_next;
168 unsigned int tx_echo;
169 unsigned int rx_next;
170
171 struct clk *clk;
172 struct at91_can_data *pdata;
173};
174
175static struct can_bittiming_const at91_bittiming_const = {
176 .tseg1_min = 4,
177 .tseg1_max = 16,
178 .tseg2_min = 2,
179 .tseg2_max = 8,
180 .sjw_max = 4,
181 .brp_min = 2,
182 .brp_max = 128,
183 .brp_inc = 1,
184};
185
186static inline int get_tx_next_mb(const struct at91_priv *priv)
187{
188 return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
189}
190
191static inline int get_tx_next_prio(const struct at91_priv *priv)
192{
193 return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
194}
195
196static inline int get_tx_echo_mb(const struct at91_priv *priv)
197{
198 return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
199}
200
201static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
202{
203 return readl(priv->reg_base + reg);
204}
205
206static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
207 u32 value)
208{
209 writel(value, priv->reg_base + reg);
210}
211
212static inline void set_mb_mode_prio(const struct at91_priv *priv,
213 unsigned int mb, enum at91_mb_mode mode, int prio)
214{
215 at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
216}
217
218static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
219 enum at91_mb_mode mode)
220{
221 set_mb_mode_prio(priv, mb, mode, 0);
222}
223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/*
257 * Swtich transceiver on or off
258 */
259static void at91_transceiver_switch(const struct at91_priv *priv, int on)
260{
261 if (priv->pdata && priv->pdata->transceiver_switch)
262 priv->pdata->transceiver_switch(on);
263}
264
265static void at91_setup_mailboxes(struct net_device *dev)
266{
267 struct at91_priv *priv = netdev_priv(dev);
268 unsigned int i;
269
270 /*
271 * The first 12 mailboxes are used as a reception FIFO. The
272 * last mailbox is configured with overwrite option. The
273 * overwrite flag indicates a FIFO overflow.
274 */
275 for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
276 set_mb_mode(priv, i, AT91_MB_MODE_RX);
277 set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
278
279 /* The last 4 mailboxes are used for transmitting. */
280 for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
281 set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
282
283 /* Reset tx and rx helper pointers */
284 priv->tx_next = priv->tx_echo = priv->rx_next = 0;
285}
286
287static int at91_set_bittiming(struct net_device *dev)
288{
289 const struct at91_priv *priv = netdev_priv(dev);
290 const struct can_bittiming *bt = &priv->can.bittiming;
291 u32 reg_br;
292
293 reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
294 ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
295 ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
296 ((bt->phase_seg2 - 1) << 0);
297
298 dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
299
300 at91_write(priv, AT91_BR, reg_br);
301
302 return 0;
303}
304
305static void at91_chip_start(struct net_device *dev)
306{
307 struct at91_priv *priv = netdev_priv(dev);
308 u32 reg_mr, reg_ier;
309
310 /* disable interrupts */
311 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
312
313 /* disable chip */
314 reg_mr = at91_read(priv, AT91_MR);
315 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
316
317 at91_setup_mailboxes(dev);
318 at91_transceiver_switch(priv, 1);
319
320 /* enable chip */
321 at91_write(priv, AT91_MR, AT91_MR_CANEN);
322
323 priv->can.state = CAN_STATE_ERROR_ACTIVE;
324
325 /* Enable interrupts */
326 reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
327 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
328 at91_write(priv, AT91_IER, reg_ier);
329}
330
331static void at91_chip_stop(struct net_device *dev, enum can_state state)
332{
333 struct at91_priv *priv = netdev_priv(dev);
334 u32 reg_mr;
335
336 /* disable interrupts */
337 at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
338
339 reg_mr = at91_read(priv, AT91_MR);
340 at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
341
342 at91_transceiver_switch(priv, 0);
343 priv->can.state = state;
344}
345
346/*
347 * theory of operation:
348 *
349 * According to the datasheet priority 0 is the highest priority, 15
350 * is the lowest. If two mailboxes have the same priority level the
351 * message of the mailbox with the lowest number is sent first.
352 *
353 * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
354 * the next mailbox with prio 0, and so on, until all mailboxes are
355 * used. Then we start from the beginning with mailbox
356 * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
357 * prio 1. When we reach the last mailbox with prio 15, we have to
358 * stop sending, waiting for all messages to be delivered, then start
359 * again with mailbox AT91_MB_TX_FIRST prio 0.
360 *
361 * We use the priv->tx_next as counter for the next transmission
362 * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
363 * encode the mailbox number, the upper 4 bits the mailbox priority:
364 *
365 * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
366 * (mb - AT91_MB_TX_FIRST);
367 *
368 */
369static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
370{
371 struct at91_priv *priv = netdev_priv(dev);
372 struct net_device_stats *stats = &dev->stats;
373 struct can_frame *cf = (struct can_frame *)skb->data;
374 unsigned int mb, prio;
375 u32 reg_mid, reg_mcr;
376
377 mb = get_tx_next_mb(priv);
378 prio = get_tx_next_prio(priv);
379
380 if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
381 netif_stop_queue(dev);
382
383 dev_err(dev->dev.parent,
384 "BUG! TX buffer full when queue awake!\n");
385 return NETDEV_TX_BUSY;
386 }
387
388 if (cf->can_id & CAN_EFF_FLAG)
389 reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
390 else
391 reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
392
393 reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
394 (cf->can_dlc << 16) | AT91_MCR_MTCR;
395
396 /* disable MB while writing ID (see datasheet) */
397 set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
398 at91_write(priv, AT91_MID(mb), reg_mid);
399 set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
400
401 at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
402 at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
403
404 /* This triggers transmission */
405 at91_write(priv, AT91_MCR(mb), reg_mcr);
406
407 stats->tx_bytes += cf->can_dlc;
408 dev->trans_start = jiffies;
409
410 /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
411 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
412
413 /*
414 * we have to stop the queue and deliver all messages in case
415 * of a prio+mb counter wrap around. This is the case if
416 * tx_next buffer prio and mailbox equals 0.
417 *
418 * also stop the queue if next buffer is still in use
419 * (== not ready)
420 */
421 priv->tx_next++;
422 if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
423 AT91_MSR_MRDY) ||
424 (priv->tx_next & AT91_NEXT_MASK) == 0)
425 netif_stop_queue(dev);
426
427 /* Enable interrupt for this mailbox */
428 at91_write(priv, AT91_IER, 1 << mb);
429
430 return NETDEV_TX_OK;
431}
432
433/**
434 * at91_activate_rx_low - activate lower rx mailboxes
435 * @priv: a91 context
436 *
437 * Reenables the lower mailboxes for reception of new CAN messages
438 */
439static inline void at91_activate_rx_low(const struct at91_priv *priv)
440{
441 u32 mask = AT91_MB_RX_LOW_MASK;
442 at91_write(priv, AT91_TCR, mask);
443}
444
445/**
446 * at91_activate_rx_mb - reactive single rx mailbox
447 * @priv: a91 context
448 * @mb: mailbox to reactivate
449 *
450 * Reenables given mailbox for reception of new CAN messages
451 */
452static inline void at91_activate_rx_mb(const struct at91_priv *priv,
453 unsigned int mb)
454{
455 u32 mask = 1 << mb;
456 at91_write(priv, AT91_TCR, mask);
457}
458
459/**
460 * at91_rx_overflow_err - send error frame due to rx overflow
461 * @dev: net device
462 */
463static void at91_rx_overflow_err(struct net_device *dev)
464{
465 struct net_device_stats *stats = &dev->stats;
466 struct sk_buff *skb;
467 struct can_frame *cf;
468
469 dev_dbg(dev->dev.parent, "RX buffer overflow\n");
470 stats->rx_over_errors++;
471 stats->rx_errors++;
472
473 skb = alloc_can_err_skb(dev, &cf);
474 if (unlikely(!skb))
475 return;
476
477 cf->can_id |= CAN_ERR_CRTL;
478 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
479 netif_receive_skb(skb);
480
481 stats->rx_packets++;
482 stats->rx_bytes += cf->can_dlc;
483}
484
485/**
486 * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
487 * @dev: net device
488 * @mb: mailbox number to read from
489 * @cf: can frame where to store message
490 *
491 * Reads a CAN message from the given mailbox and stores data into
492 * given can frame. "mb" and "cf" must be valid.
493 */
494static void at91_read_mb(struct net_device *dev, unsigned int mb,
495 struct can_frame *cf)
496{
497 const struct at91_priv *priv = netdev_priv(dev);
498 u32 reg_msr, reg_mid;
499
500 reg_mid = at91_read(priv, AT91_MID(mb));
501 if (reg_mid & AT91_MID_MIDE)
502 cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
503 else
504 cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
505
506 reg_msr = at91_read(priv, AT91_MSR(mb));
507 if (reg_msr & AT91_MSR_MRTR)
508 cf->can_id |= CAN_RTR_FLAG;
509 cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
510
511 *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
512 *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
513
514 if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
515 at91_rx_overflow_err(dev);
516}
517
518/**
519 * at91_read_msg - read CAN message from mailbox
520 * @dev: net device
521 * @mb: mail box to read from
522 *
523 * Reads a CAN message from given mailbox, and put into linux network
524 * RX queue, does all housekeeping chores (stats, ...)
525 */
526static void at91_read_msg(struct net_device *dev, unsigned int mb)
527{
528 struct net_device_stats *stats = &dev->stats;
529 struct can_frame *cf;
530 struct sk_buff *skb;
531
532 skb = alloc_can_skb(dev, &cf);
533 if (unlikely(!skb)) {
534 stats->rx_dropped++;
535 return;
536 }
537
538 at91_read_mb(dev, mb, cf);
539 netif_receive_skb(skb);
540
541 stats->rx_packets++;
542 stats->rx_bytes += cf->can_dlc;
543}
544
545/**
546 * at91_poll_rx - read multiple CAN messages from mailboxes
547 * @dev: net device
548 * @quota: max number of pkgs we're allowed to receive
549 *
550 * Theory of Operation:
551 *
552 * 12 of the 16 mailboxes on the chip are reserved for RX. we split
553 * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
554 *
555 * Like it or not, but the chip always saves a received CAN message
556 * into the first free mailbox it finds (starting with the
557 * lowest). This makes it very difficult to read the messages in the
558 * right order from the chip. This is how we work around that problem:
559 *
560 * The first message goes into mb nr. 0 and issues an interrupt. All
561 * rx ints are disabled in the interrupt handler and a napi poll is
562 * scheduled. We read the mailbox, but do _not_ reenable the mb (to
563 * receive another message).
564 *
565 * lower mbxs upper
566 * ______^______ __^__
567 * / \ / \
568 * +-+-+-+-+-+-+-+-++-+-+-+-+
569 * |x|x|x|x|x|x|x|x|| | | | |
570 * +-+-+-+-+-+-+-+-++-+-+-+-+
571 * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
572 * 0 1 2 3 4 5 6 7 8 9 0 1 / box
573 *
574 * The variable priv->rx_next points to the next mailbox to read a
575 * message from. As long we're in the lower mailboxes we just read the
576 * mailbox but not reenable it.
577 *
578 * With completion of the last of the lower mailboxes, we reenable the
579 * whole first group, but continue to look for filled mailboxes in the
580 * upper mailboxes. Imagine the second group like overflow mailboxes,
581 * which takes CAN messages if the lower goup is full. While in the
582 * upper group we reenable the mailbox right after reading it. Giving
583 * the chip more room to store messages.
584 *
585 * After finishing we look again in the lower group if we've still
586 * quota.
587 *
588 */
589static int at91_poll_rx(struct net_device *dev, int quota)
590{
591 struct at91_priv *priv = netdev_priv(dev);
592 u32 reg_sr = at91_read(priv, AT91_SR);
593 const unsigned long *addr = (unsigned long *)&reg_sr;
594 unsigned int mb;
595 int received = 0;
596
597 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
598 reg_sr & AT91_MB_RX_LOW_MASK)
599 dev_info(dev->dev.parent,
600 "order of incoming frames cannot be guaranteed\n");
601
602 again:
603 for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
604 mb < AT91_MB_RX_NUM && quota > 0;
605 reg_sr = at91_read(priv, AT91_SR),
606 mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
607 at91_read_msg(dev, mb);
608
609 /* reactivate mailboxes */
610 if (mb == AT91_MB_RX_LOW_LAST)
611 /* all lower mailboxed, if just finished it */
612 at91_activate_rx_low(priv);
613 else if (mb > AT91_MB_RX_LOW_LAST)
614 /* only the mailbox we read */
615 at91_activate_rx_mb(priv, mb);
616
617 received++;
618 quota--;
619 }
620
621 /* upper group completed, look again in lower */
622 if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
623 quota > 0 && mb >= AT91_MB_RX_NUM) {
624 priv->rx_next = 0;
625 goto again;
626 }
627
628 return received;
629}
630
631static void at91_poll_err_frame(struct net_device *dev,
632 struct can_frame *cf, u32 reg_sr)
633{
634 struct at91_priv *priv = netdev_priv(dev);
635
636 /* CRC error */
637 if (reg_sr & AT91_IRQ_CERR) {
638 dev_dbg(dev->dev.parent, "CERR irq\n");
639 dev->stats.rx_errors++;
640 priv->can.can_stats.bus_error++;
641 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
642 }
643
644 /* Stuffing Error */
645 if (reg_sr & AT91_IRQ_SERR) {
646 dev_dbg(dev->dev.parent, "SERR irq\n");
647 dev->stats.rx_errors++;
648 priv->can.can_stats.bus_error++;
649 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
650 cf->data[2] |= CAN_ERR_PROT_STUFF;
651 }
652
653 /* Acknowledgement Error */
654 if (reg_sr & AT91_IRQ_AERR) {
655 dev_dbg(dev->dev.parent, "AERR irq\n");
656 dev->stats.tx_errors++;
657 cf->can_id |= CAN_ERR_ACK;
658 }
659
660 /* Form error */
661 if (reg_sr & AT91_IRQ_FERR) {
662 dev_dbg(dev->dev.parent, "FERR irq\n");
663 dev->stats.rx_errors++;
664 priv->can.can_stats.bus_error++;
665 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
666 cf->data[2] |= CAN_ERR_PROT_FORM;
667 }
668
669 /* Bit Error */
670 if (reg_sr & AT91_IRQ_BERR) {
671 dev_dbg(dev->dev.parent, "BERR irq\n");
672 dev->stats.tx_errors++;
673 priv->can.can_stats.bus_error++;
674 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
675 cf->data[2] |= CAN_ERR_PROT_BIT;
676 }
677}
678
679static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
680{
681 struct sk_buff *skb;
682 struct can_frame *cf;
683
684 if (quota == 0)
685 return 0;
686
687 skb = alloc_can_err_skb(dev, &cf);
688 if (unlikely(!skb))
689 return 0;
690
691 at91_poll_err_frame(dev, cf, reg_sr);
692 netif_receive_skb(skb);
693
694 dev->last_rx = jiffies;
695 dev->stats.rx_packets++;
696 dev->stats.rx_bytes += cf->can_dlc;
697
698 return 1;
699}
700
701static int at91_poll(struct napi_struct *napi, int quota)
702{
703 struct net_device *dev = napi->dev;
704 const struct at91_priv *priv = netdev_priv(dev);
705 u32 reg_sr = at91_read(priv, AT91_SR);
706 int work_done = 0;
707
708 if (reg_sr & AT91_IRQ_MB_RX)
709 work_done += at91_poll_rx(dev, quota - work_done);
710
711 /*
712 * The error bits are clear on read,
713 * so use saved value from irq handler.
714 */
715 reg_sr |= priv->reg_sr;
716 if (reg_sr & AT91_IRQ_ERR_FRAME)
717 work_done += at91_poll_err(dev, quota - work_done, reg_sr);
718
719 if (work_done < quota) {
720 /* enable IRQs for frame errors and all mailboxes >= rx_next */
721 u32 reg_ier = AT91_IRQ_ERR_FRAME;
722 reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
723
724 napi_complete(napi);
725 at91_write(priv, AT91_IER, reg_ier);
726 }
727
728 return work_done;
729}
730
731/*
732 * theory of operation:
733 *
734 * priv->tx_echo holds the number of the oldest can_frame put for
735 * transmission into the hardware, but not yet ACKed by the CAN tx
736 * complete IRQ.
737 *
738 * We iterate from priv->tx_echo to priv->tx_next and check if the
739 * packet has been transmitted, echo it back to the CAN framework. If
740 * we discover a not yet transmitted package, stop looking for more.
741 *
742 */
743static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
744{
745 struct at91_priv *priv = netdev_priv(dev);
746 u32 reg_msr;
747 unsigned int mb;
748
749 /* masking of reg_sr not needed, already done by at91_irq */
750
751 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
752 mb = get_tx_echo_mb(priv);
753
754 /* no event in mailbox? */
755 if (!(reg_sr & (1 << mb)))
756 break;
757
758 /* Disable irq for this TX mailbox */
759 at91_write(priv, AT91_IDR, 1 << mb);
760
761 /*
762 * only echo if mailbox signals us a transfer
763 * complete (MSR_MRDY). Otherwise it's a tansfer
764 * abort. "can_bus_off()" takes care about the skbs
765 * parked in the echo queue.
766 */
767 reg_msr = at91_read(priv, AT91_MSR(mb));
768 if (likely(reg_msr & AT91_MSR_MRDY &&
769 ~reg_msr & AT91_MSR_MABT)) {
770 /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
771 can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
772 dev->stats.tx_packets++;
773 }
774 }
775
776 /*
777 * restart queue if we don't have a wrap around but restart if
778 * we get a TX int for the last can frame directly before a
779 * wrap around.
780 */
781 if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
782 (priv->tx_echo & AT91_NEXT_MASK) == 0)
783 netif_wake_queue(dev);
784}
785
786static void at91_irq_err_state(struct net_device *dev,
787 struct can_frame *cf, enum can_state new_state)
788{
789 struct at91_priv *priv = netdev_priv(dev);
790 u32 reg_idr, reg_ier, reg_ecr;
791 u8 tec, rec;
792
793 reg_ecr = at91_read(priv, AT91_ECR);
794 rec = reg_ecr & 0xff;
795 tec = reg_ecr >> 16;
796
797 switch (priv->can.state) {
798 case CAN_STATE_ERROR_ACTIVE:
799 /*
800 * from: ERROR_ACTIVE
801 * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
802 * => : there was a warning int
803 */
804 if (new_state >= CAN_STATE_ERROR_WARNING &&
805 new_state <= CAN_STATE_BUS_OFF) {
806 dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
807 priv->can.can_stats.error_warning++;
808
809 cf->can_id |= CAN_ERR_CRTL;
810 cf->data[1] = (tec > rec) ?
811 CAN_ERR_CRTL_TX_WARNING :
812 CAN_ERR_CRTL_RX_WARNING;
813 }
814 case CAN_STATE_ERROR_WARNING: /* fallthrough */
815 /*
816 * from: ERROR_ACTIVE, ERROR_WARNING
817 * to : ERROR_PASSIVE, BUS_OFF
818 * => : error passive int
819 */
820 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
821 new_state <= CAN_STATE_BUS_OFF) {
822 dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
823 priv->can.can_stats.error_passive++;
824
825 cf->can_id |= CAN_ERR_CRTL;
826 cf->data[1] = (tec > rec) ?
827 CAN_ERR_CRTL_TX_PASSIVE :
828 CAN_ERR_CRTL_RX_PASSIVE;
829 }
830 break;
831 case CAN_STATE_BUS_OFF:
832 /*
833 * from: BUS_OFF
834 * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
835 */
836 if (new_state <= CAN_STATE_ERROR_PASSIVE) {
837 cf->can_id |= CAN_ERR_RESTARTED;
838
839 dev_dbg(dev->dev.parent, "restarted\n");
840 priv->can.can_stats.restarts++;
841
842 netif_carrier_on(dev);
843 netif_wake_queue(dev);
844 }
845 break;
846 default:
847 break;
848 }
849
850
851 /* process state changes depending on the new state */
852 switch (new_state) {
853 case CAN_STATE_ERROR_ACTIVE:
854 /*
855 * actually we want to enable AT91_IRQ_WARN here, but
856 * it screws up the system under certain
857 * circumstances. so just enable AT91_IRQ_ERRP, thus
858 * the "fallthrough"
859 */
860 dev_dbg(dev->dev.parent, "Error Active\n");
861 cf->can_id |= CAN_ERR_PROT;
862 cf->data[2] = CAN_ERR_PROT_ACTIVE;
863 case CAN_STATE_ERROR_WARNING: /* fallthrough */
864 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
865 reg_ier = AT91_IRQ_ERRP;
866 break;
867 case CAN_STATE_ERROR_PASSIVE:
868 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
869 reg_ier = AT91_IRQ_BOFF;
870 break;
871 case CAN_STATE_BUS_OFF:
872 reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
873 AT91_IRQ_WARN | AT91_IRQ_BOFF;
874 reg_ier = 0;
875
876 cf->can_id |= CAN_ERR_BUSOFF;
877
878 dev_dbg(dev->dev.parent, "bus-off\n");
879 netif_carrier_off(dev);
880 priv->can.can_stats.bus_off++;
881
882 /* turn off chip, if restart is disabled */
883 if (!priv->can.restart_ms) {
884 at91_chip_stop(dev, CAN_STATE_BUS_OFF);
885 return;
886 }
887 break;
888 default:
889 break;
890 }
891
892 at91_write(priv, AT91_IDR, reg_idr);
893 at91_write(priv, AT91_IER, reg_ier);
894}
895
896static void at91_irq_err(struct net_device *dev)
897{
898 struct at91_priv *priv = netdev_priv(dev);
899 struct sk_buff *skb;
900 struct can_frame *cf;
901 enum can_state new_state;
902 u32 reg_sr;
903
904 reg_sr = at91_read(priv, AT91_SR);
905
906 /* we need to look at the unmasked reg_sr */
907 if (unlikely(reg_sr & AT91_IRQ_BOFF))
908 new_state = CAN_STATE_BUS_OFF;
909 else if (unlikely(reg_sr & AT91_IRQ_ERRP))
910 new_state = CAN_STATE_ERROR_PASSIVE;
911 else if (unlikely(reg_sr & AT91_IRQ_WARN))
912 new_state = CAN_STATE_ERROR_WARNING;
913 else if (likely(reg_sr & AT91_IRQ_ERRA))
914 new_state = CAN_STATE_ERROR_ACTIVE;
915 else {
916 dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
917 return;
918 }
919
920 /* state hasn't changed */
921 if (likely(new_state == priv->can.state))
922 return;
923
924 skb = alloc_can_err_skb(dev, &cf);
925 if (unlikely(!skb))
926 return;
927
928 at91_irq_err_state(dev, cf, new_state);
929 netif_rx(skb);
930
931 dev->last_rx = jiffies;
932 dev->stats.rx_packets++;
933 dev->stats.rx_bytes += cf->can_dlc;
934
935 priv->can.state = new_state;
936}
937
938/*
939 * interrupt handler
940 */
941static irqreturn_t at91_irq(int irq, void *dev_id)
942{
943 struct net_device *dev = dev_id;
944 struct at91_priv *priv = netdev_priv(dev);
945 irqreturn_t handled = IRQ_NONE;
946 u32 reg_sr, reg_imr;
947
948 reg_sr = at91_read(priv, AT91_SR);
949 reg_imr = at91_read(priv, AT91_IMR);
950
951 /* Ignore masked interrupts */
952 reg_sr &= reg_imr;
953 if (!reg_sr)
954 goto exit;
955
956 handled = IRQ_HANDLED;
957
958 /* Receive or error interrupt? -> napi */
959 if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
960 /*
961 * The error bits are clear on read,
962 * save for later use.
963 */
964 priv->reg_sr = reg_sr;
965 at91_write(priv, AT91_IDR,
966 AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
967 napi_schedule(&priv->napi);
968 }
969
970 /* Transmission complete interrupt */
971 if (reg_sr & AT91_IRQ_MB_TX)
972 at91_irq_tx(dev, reg_sr);
973
974 at91_irq_err(dev);
975
976 exit:
977 return handled;
978}
979
980static int at91_open(struct net_device *dev)
981{
982 struct at91_priv *priv = netdev_priv(dev);
983 int err;
984
985 clk_enable(priv->clk);
986
987 /* check or determine and set bittime */
988 err = open_candev(dev);
989 if (err)
990 goto out;
991
992 /* register interrupt handler */
993 if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
994 dev->name, dev)) {
995 err = -EAGAIN;
996 goto out_close;
997 }
998
999 /* start chip and queuing */
1000 at91_chip_start(dev);
1001 napi_enable(&priv->napi);
1002 netif_start_queue(dev);
1003
1004 return 0;
1005
1006 out_close:
1007 close_candev(dev);
1008 out:
1009 clk_disable(priv->clk);
1010
1011 return err;
1012}
1013
1014/*
1015 * stop CAN bus activity
1016 */
1017static int at91_close(struct net_device *dev)
1018{
1019 struct at91_priv *priv = netdev_priv(dev);
1020
1021 netif_stop_queue(dev);
1022 napi_disable(&priv->napi);
1023 at91_chip_stop(dev, CAN_STATE_STOPPED);
1024
1025 free_irq(dev->irq, dev);
1026 clk_disable(priv->clk);
1027
1028 close_candev(dev);
1029
1030 return 0;
1031}
1032
1033static int at91_set_mode(struct net_device *dev, enum can_mode mode)
1034{
1035 switch (mode) {
1036 case CAN_MODE_START:
1037 at91_chip_start(dev);
1038 netif_wake_queue(dev);
1039 break;
1040
1041 default:
1042 return -EOPNOTSUPP;
1043 }
1044
1045 return 0;
1046}
1047
1048static const struct net_device_ops at91_netdev_ops = {
1049 .ndo_open = at91_open,
1050 .ndo_stop = at91_close,
1051 .ndo_start_xmit = at91_start_xmit,
1052};
1053
1054static int __init at91_can_probe(struct platform_device *pdev)
1055{
1056 struct net_device *dev;
1057 struct at91_priv *priv;
1058 struct resource *res;
1059 struct clk *clk;
1060 void __iomem *addr;
1061 int err, irq;
1062
1063 clk = clk_get(&pdev->dev, "can_clk");
1064 if (IS_ERR(clk)) {
1065 dev_err(&pdev->dev, "no clock defined\n");
1066 err = -ENODEV;
1067 goto exit;
1068 }
1069
1070 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1071 irq = platform_get_irq(pdev, 0);
1072 if (!res || !irq) {
1073 err = -ENODEV;
1074 goto exit_put;
1075 }
1076
1077 if (!request_mem_region(res->start,
1078 resource_size(res),
1079 pdev->name)) {
1080 err = -EBUSY;
1081 goto exit_put;
1082 }
1083
1084 addr = ioremap_nocache(res->start, resource_size(res));
1085 if (!addr) {
1086 err = -ENOMEM;
1087 goto exit_release;
1088 }
1089
1090 dev = alloc_candev(sizeof(struct at91_priv));
1091 if (!dev) {
1092 err = -ENOMEM;
1093 goto exit_iounmap;
1094 }
1095
1096 dev->netdev_ops = &at91_netdev_ops;
1097 dev->irq = irq;
1098 dev->flags |= IFF_ECHO;
1099
1100 priv = netdev_priv(dev);
1101 priv->can.clock.freq = clk_get_rate(clk);
1102 priv->can.bittiming_const = &at91_bittiming_const;
1103 priv->can.do_set_bittiming = at91_set_bittiming;
1104 priv->can.do_set_mode = at91_set_mode;
1105 priv->reg_base = addr;
1106 priv->dev = dev;
1107 priv->clk = clk;
1108 priv->pdata = pdev->dev.platform_data;
1109
1110 netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
1111
1112 dev_set_drvdata(&pdev->dev, dev);
1113 SET_NETDEV_DEV(dev, &pdev->dev);
1114
1115 err = register_candev(dev);
1116 if (err) {
1117 dev_err(&pdev->dev, "registering netdev failed\n");
1118 goto exit_free;
1119 }
1120
1121 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
1122 priv->reg_base, dev->irq);
1123
1124 return 0;
1125
1126 exit_free:
1127 free_netdev(dev);
1128 exit_iounmap:
1129 iounmap(addr);
1130 exit_release:
1131 release_mem_region(res->start, resource_size(res));
1132 exit_put:
1133 clk_put(clk);
1134 exit:
1135 return err;
1136}
1137
1138static int __devexit at91_can_remove(struct platform_device *pdev)
1139{
1140 struct net_device *dev = platform_get_drvdata(pdev);
1141 struct at91_priv *priv = netdev_priv(dev);
1142 struct resource *res;
1143
1144 unregister_netdev(dev);
1145
1146 platform_set_drvdata(pdev, NULL);
1147
1148 free_netdev(dev);
1149
1150 iounmap(priv->reg_base);
1151
1152 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1153 release_mem_region(res->start, resource_size(res));
1154
1155 clk_put(priv->clk);
1156
1157 return 0;
1158}
1159
1160static struct platform_driver at91_can_driver = {
1161 .probe = at91_can_probe,
1162 .remove = __devexit_p(at91_can_remove),
1163 .driver = {
1164 .name = DRV_NAME,
1165 .owner = THIS_MODULE,
1166 },
1167};
1168
1169static int __init at91_can_module_init(void)
1170{
1171 printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
1172 return platform_driver_register(&at91_can_driver);
1173}
1174
1175static void __exit at91_can_module_exit(void)
1176{
1177 platform_driver_unregister(&at91_can_driver);
1178 printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
1179}
1180
1181module_init(at91_can_module_init);
1182module_exit(at91_can_module_exit);
1183
1184MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
1185MODULE_LICENSE("GPL v2");
1186MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index d465eaa796c4..65a2d0ba64e2 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -200,6 +200,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
200/** NOTE:: For DM646x the IN_VECTOR has changed */ 200/** NOTE:: For DM646x the IN_VECTOR has changed */
201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) 201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
202#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH) 202#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
203#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
204#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
205
203 206
204/* CPPI bit positions */ 207/* CPPI bit positions */
205#define EMAC_CPPI_SOP_BIT BIT(31) 208#define EMAC_CPPI_SOP_BIT BIT(31)
@@ -2167,7 +2170,11 @@ static int emac_poll(struct napi_struct *napi, int budget)
2167 emac_int_enable(priv); 2170 emac_int_enable(priv);
2168 } 2171 }
2169 2172
2170 if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) { 2173 mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
2174 if (priv->version == EMAC_VERSION_2)
2175 mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
2176
2177 if (unlikely(status & mask)) {
2171 u32 ch, cause; 2178 u32 ch, cause;
2172 dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n"); 2179 dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
2173 netif_stop_queue(ndev); 2180 netif_stop_queue(ndev);
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 2fc30b449eea..cb90d640007a 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -66,7 +66,6 @@
66#include <linux/errno.h> 66#include <linux/errno.h>
67#include <linux/init.h> 67#include <linux/init.h>
68#include <linux/slab.h> 68#include <linux/slab.h>
69#include <linux/kref.h>
70#include <linux/usb.h> 69#include <linux/usb.h>
71#include <linux/device.h> 70#include <linux/device.h>
72#include <linux/crc32.h> 71#include <linux/crc32.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index f4d13fc51cbc..b54d3b48045e 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
118#include <linux/errno.h> 118#include <linux/errno.h>
119#include <linux/init.h> 119#include <linux/init.h>
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/kref.h>
122#include <linux/usb.h> 121#include <linux/usb.h>
123#include <linux/device.h> 122#include <linux/device.h>
124#include <linux/crc32.h> 123#include <linux/crc32.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5f9d73353972..8d713ebac15b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
82#include <linux/errno.h> 82#include <linux/errno.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/slab.h> 84#include <linux/slab.h>
85#include <linux/kref.h>
86#include <linux/usb.h> 85#include <linux/usb.h>
87#include <linux/device.h> 86#include <linux/device.h>
88#include <linux/crc32.h> 87#include <linux/crc32.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b3d30bcb88e7..c0e0bb9401d3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -50,7 +50,6 @@
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/kref.h>
54#include <linux/usb.h> 53#include <linux/usb.h>
55#include <linux/device.h> 54#include <linux/device.h>
56#include <linux/crc32.h> 55#include <linux/crc32.h>
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c498d2b043f..d445845f2779 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
1/* A simple network driver using virtio. 1/* A network driver using virtio.
2 * 2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 * 4 *
@@ -48,19 +48,9 @@ struct virtnet_info
48 struct napi_struct napi; 48 struct napi_struct napi;
49 unsigned int status; 49 unsigned int status;
50 50
51 /* The skb we couldn't send because buffers were full. */
52 struct sk_buff *last_xmit_skb;
53
54 /* If we need to free in a timer, this is it. */
55 struct timer_list xmit_free_timer;
56
57 /* Number of input buffers, and max we've ever had. */ 51 /* Number of input buffers, and max we've ever had. */
58 unsigned int num, max; 52 unsigned int num, max;
59 53
60 /* For cleaning up after transmission. */
61 struct tasklet_struct tasklet;
62 bool free_in_tasklet;
63
64 /* I like... big packets and I cannot lie! */ 54 /* I like... big packets and I cannot lie! */
65 bool big_packets; 55 bool big_packets;
66 56
@@ -78,9 +68,17 @@ struct virtnet_info
78 struct page *pages; 68 struct page *pages;
79}; 69};
80 70
81static inline void *skb_vnet_hdr(struct sk_buff *skb) 71struct skb_vnet_hdr {
72 union {
73 struct virtio_net_hdr hdr;
74 struct virtio_net_hdr_mrg_rxbuf mhdr;
75 };
76 unsigned int num_sg;
77};
78
79static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
82{ 80{
83 return (struct virtio_net_hdr *)skb->cb; 81 return (struct skb_vnet_hdr *)skb->cb;
84} 82}
85 83
86static void give_a_page(struct virtnet_info *vi, struct page *page) 84static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -119,17 +117,13 @@ static void skb_xmit_done(struct virtqueue *svq)
119 117
120 /* We were probably waiting for more output buffers. */ 118 /* We were probably waiting for more output buffers. */
121 netif_wake_queue(vi->dev); 119 netif_wake_queue(vi->dev);
122
123 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
124 * queued, start_xmit won't be called. */
125 tasklet_schedule(&vi->tasklet);
126} 120}
127 121
128static void receive_skb(struct net_device *dev, struct sk_buff *skb, 122static void receive_skb(struct net_device *dev, struct sk_buff *skb,
129 unsigned len) 123 unsigned len)
130{ 124{
131 struct virtnet_info *vi = netdev_priv(dev); 125 struct virtnet_info *vi = netdev_priv(dev);
132 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 126 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
133 int err; 127 int err;
134 int i; 128 int i;
135 129
@@ -140,7 +134,6 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
140 } 134 }
141 135
142 if (vi->mergeable_rx_bufs) { 136 if (vi->mergeable_rx_bufs) {
143 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
144 unsigned int copy; 137 unsigned int copy;
145 char *p = page_address(skb_shinfo(skb)->frags[0].page); 138 char *p = page_address(skb_shinfo(skb)->frags[0].page);
146 139
@@ -148,8 +141,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
148 len = PAGE_SIZE; 141 len = PAGE_SIZE;
149 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 142 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
150 143
151 memcpy(hdr, p, sizeof(*mhdr)); 144 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
152 p += sizeof(*mhdr); 145 p += sizeof(hdr->mhdr);
153 146
154 copy = len; 147 copy = len;
155 if (copy > skb_tailroom(skb)) 148 if (copy > skb_tailroom(skb))
@@ -164,13 +157,13 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
164 skb_shinfo(skb)->nr_frags--; 157 skb_shinfo(skb)->nr_frags--;
165 } else { 158 } else {
166 skb_shinfo(skb)->frags[0].page_offset += 159 skb_shinfo(skb)->frags[0].page_offset +=
167 sizeof(*mhdr) + copy; 160 sizeof(hdr->mhdr) + copy;
168 skb_shinfo(skb)->frags[0].size = len; 161 skb_shinfo(skb)->frags[0].size = len;
169 skb->data_len += len; 162 skb->data_len += len;
170 skb->len += len; 163 skb->len += len;
171 } 164 }
172 165
173 while (--mhdr->num_buffers) { 166 while (--hdr->mhdr.num_buffers) {
174 struct sk_buff *nskb; 167 struct sk_buff *nskb;
175 168
176 i = skb_shinfo(skb)->nr_frags; 169 i = skb_shinfo(skb)->nr_frags;
@@ -184,7 +177,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
184 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 177 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
185 if (!nskb) { 178 if (!nskb) {
186 pr_debug("%s: rx error: %d buffers missing\n", 179 pr_debug("%s: rx error: %d buffers missing\n",
187 dev->name, mhdr->num_buffers); 180 dev->name, hdr->mhdr.num_buffers);
188 dev->stats.rx_length_errors++; 181 dev->stats.rx_length_errors++;
189 goto drop; 182 goto drop;
190 } 183 }
@@ -205,7 +198,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
205 skb->len += len; 198 skb->len += len;
206 } 199 }
207 } else { 200 } else {
208 len -= sizeof(struct virtio_net_hdr); 201 len -= sizeof(hdr->hdr);
209 202
210 if (len <= MAX_PACKET_LEN) 203 if (len <= MAX_PACKET_LEN)
211 trim_pages(vi, skb); 204 trim_pages(vi, skb);
@@ -223,9 +216,11 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
223 dev->stats.rx_bytes += skb->len; 216 dev->stats.rx_bytes += skb->len;
224 dev->stats.rx_packets++; 217 dev->stats.rx_packets++;
225 218
226 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 219 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
227 pr_debug("Needs csum!\n"); 220 pr_debug("Needs csum!\n");
228 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) 221 if (!skb_partial_csum_set(skb,
222 hdr->hdr.csum_start,
223 hdr->hdr.csum_offset))
229 goto frame_err; 224 goto frame_err;
230 } 225 }
231 226
@@ -233,9 +228,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
233 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 228 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
234 ntohs(skb->protocol), skb->len, skb->pkt_type); 229 ntohs(skb->protocol), skb->len, skb->pkt_type);
235 230
236 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 231 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
237 pr_debug("GSO!\n"); 232 pr_debug("GSO!\n");
238 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 233 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
239 case VIRTIO_NET_HDR_GSO_TCPV4: 234 case VIRTIO_NET_HDR_GSO_TCPV4:
240 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 235 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
241 break; 236 break;
@@ -248,14 +243,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
248 default: 243 default:
249 if (net_ratelimit()) 244 if (net_ratelimit())
250 printk(KERN_WARNING "%s: bad gso type %u.\n", 245 printk(KERN_WARNING "%s: bad gso type %u.\n",
251 dev->name, hdr->gso_type); 246 dev->name, hdr->hdr.gso_type);
252 goto frame_err; 247 goto frame_err;
253 } 248 }
254 249
255 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 250 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
256 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 251 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
257 252
258 skb_shinfo(skb)->gso_size = hdr->gso_size; 253 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
259 if (skb_shinfo(skb)->gso_size == 0) { 254 if (skb_shinfo(skb)->gso_size == 0) {
260 if (net_ratelimit()) 255 if (net_ratelimit())
261 printk(KERN_WARNING "%s: zero gso size.\n", 256 printk(KERN_WARNING "%s: zero gso size.\n",
@@ -285,8 +280,8 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
285 bool oom = false; 280 bool oom = false;
286 281
287 sg_init_table(sg, 2+MAX_SKB_FRAGS); 282 sg_init_table(sg, 2+MAX_SKB_FRAGS);
288 for (;;) { 283 do {
289 struct virtio_net_hdr *hdr; 284 struct skb_vnet_hdr *hdr;
290 285
291 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
292 if (unlikely(!skb)) { 287 if (unlikely(!skb)) {
@@ -298,7 +293,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
298 skb_put(skb, MAX_PACKET_LEN); 293 skb_put(skb, MAX_PACKET_LEN);
299 294
300 hdr = skb_vnet_hdr(skb); 295 hdr = skb_vnet_hdr(skb);
301 sg_set_buf(sg, hdr, sizeof(*hdr)); 296 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
302 297
303 if (vi->big_packets) { 298 if (vi->big_packets) {
304 for (i = 0; i < MAX_SKB_FRAGS; i++) { 299 for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -328,7 +323,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
328 break; 323 break;
329 } 324 }
330 vi->num++; 325 vi->num++;
331 } 326 } while (err >= num);
332 if (unlikely(vi->num > vi->max)) 327 if (unlikely(vi->num > vi->max))
333 vi->max = vi->num; 328 vi->max = vi->num;
334 vi->rvq->vq_ops->kick(vi->rvq); 329 vi->rvq->vq_ops->kick(vi->rvq);
@@ -346,7 +341,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
346 if (!vi->mergeable_rx_bufs) 341 if (!vi->mergeable_rx_bufs)
347 return try_fill_recv_maxbufs(vi, gfp); 342 return try_fill_recv_maxbufs(vi, gfp);
348 343
349 for (;;) { 344 do {
350 skb_frag_t *f; 345 skb_frag_t *f;
351 346
352 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 347 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -380,7 +375,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
380 break; 375 break;
381 } 376 }
382 vi->num++; 377 vi->num++;
383 } 378 } while (err > 0);
384 if (unlikely(vi->num > vi->max)) 379 if (unlikely(vi->num > vi->max))
385 vi->max = vi->num; 380 vi->max = vi->num;
386 vi->rvq->vq_ops->kick(vi->rvq); 381 vi->rvq->vq_ops->kick(vi->rvq);
@@ -448,42 +443,26 @@ again:
448 return received; 443 return received;
449} 444}
450 445
451static void free_old_xmit_skbs(struct virtnet_info *vi) 446static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
452{ 447{
453 struct sk_buff *skb; 448 struct sk_buff *skb;
454 unsigned int len; 449 unsigned int len, tot_sgs = 0;
455 450
456 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 451 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
457 pr_debug("Sent skb %p\n", skb); 452 pr_debug("Sent skb %p\n", skb);
458 __skb_unlink(skb, &vi->send); 453 __skb_unlink(skb, &vi->send);
459 vi->dev->stats.tx_bytes += skb->len; 454 vi->dev->stats.tx_bytes += skb->len;
460 vi->dev->stats.tx_packets++; 455 vi->dev->stats.tx_packets++;
456 tot_sgs += skb_vnet_hdr(skb)->num_sg;
461 kfree_skb(skb); 457 kfree_skb(skb);
462 } 458 }
463} 459 return tot_sgs;
464
465/* If the virtio transport doesn't always notify us when all in-flight packets
466 * are consumed, we fall back to using this function on a timer to free them. */
467static void xmit_free(unsigned long data)
468{
469 struct virtnet_info *vi = (void *)data;
470
471 netif_tx_lock(vi->dev);
472
473 free_old_xmit_skbs(vi);
474
475 if (!skb_queue_empty(&vi->send))
476 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
477
478 netif_tx_unlock(vi->dev);
479} 460}
480 461
481static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 462static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
482{ 463{
483 int num, err;
484 struct scatterlist sg[2+MAX_SKB_FRAGS]; 464 struct scatterlist sg[2+MAX_SKB_FRAGS];
485 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 465 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
486 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
487 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 466 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
488 467
489 sg_init_table(sg, 2+MAX_SKB_FRAGS); 468 sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -491,108 +470,89 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
491 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 470 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
492 471
493 if (skb->ip_summed == CHECKSUM_PARTIAL) { 472 if (skb->ip_summed == CHECKSUM_PARTIAL) {
494 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 473 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
495 hdr->csum_start = skb->csum_start - skb_headroom(skb); 474 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
496 hdr->csum_offset = skb->csum_offset; 475 hdr->hdr.csum_offset = skb->csum_offset;
497 } else { 476 } else {
498 hdr->flags = 0; 477 hdr->hdr.flags = 0;
499 hdr->csum_offset = hdr->csum_start = 0; 478 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
500 } 479 }
501 480
502 if (skb_is_gso(skb)) { 481 if (skb_is_gso(skb)) {
503 hdr->hdr_len = skb_headlen(skb); 482 hdr->hdr.hdr_len = skb_headlen(skb);
504 hdr->gso_size = skb_shinfo(skb)->gso_size; 483 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
505 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 484 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
506 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 485 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
507 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 486 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
508 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 487 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
509 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 488 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
510 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 489 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
511 else 490 else
512 BUG(); 491 BUG();
513 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 492 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
514 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 493 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
515 } else { 494 } else {
516 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 495 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
517 hdr->gso_size = hdr->hdr_len = 0; 496 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
518 } 497 }
519 498
520 mhdr->num_buffers = 0; 499 hdr->mhdr.num_buffers = 0;
521 500
522 /* Encode metadata header at front. */ 501 /* Encode metadata header at front. */
523 if (vi->mergeable_rx_bufs) 502 if (vi->mergeable_rx_bufs)
524 sg_set_buf(sg, mhdr, sizeof(*mhdr)); 503 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
525 else 504 else
526 sg_set_buf(sg, hdr, sizeof(*hdr)); 505 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
527 506
528 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 507 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
529 508 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
530 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
531 if (err >= 0 && !vi->free_in_tasklet)
532 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
533
534 return err;
535}
536
537static void xmit_tasklet(unsigned long data)
538{
539 struct virtnet_info *vi = (void *)data;
540
541 netif_tx_lock_bh(vi->dev);
542 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
543 vi->svq->vq_ops->kick(vi->svq);
544 vi->last_xmit_skb = NULL;
545 }
546 if (vi->free_in_tasklet)
547 free_old_xmit_skbs(vi);
548 netif_tx_unlock_bh(vi->dev);
549} 509}
550 510
551static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 511static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
552{ 512{
553 struct virtnet_info *vi = netdev_priv(dev); 513 struct virtnet_info *vi = netdev_priv(dev);
514 int capacity;
554 515
555again: 516again:
556 /* Free up any pending old buffers before queueing new ones. */ 517 /* Free up any pending old buffers before queueing new ones. */
557 free_old_xmit_skbs(vi); 518 free_old_xmit_skbs(vi);
558 519
559 /* If we has a buffer left over from last time, send it now. */
560 if (unlikely(vi->last_xmit_skb) &&
561 xmit_skb(vi, vi->last_xmit_skb) < 0)
562 goto stop_queue;
563
564 vi->last_xmit_skb = NULL;
565
566 /* Put new one in send queue and do transmit */ 520 /* Put new one in send queue and do transmit */
567 if (likely(skb)) { 521 __skb_queue_head(&vi->send, skb);
568 __skb_queue_head(&vi->send, skb); 522 capacity = xmit_skb(vi, skb);
569 if (xmit_skb(vi, skb) < 0) { 523
570 vi->last_xmit_skb = skb; 524 /* This can happen with OOM and indirect buffers. */
571 skb = NULL; 525 if (unlikely(capacity < 0)) {
572 goto stop_queue; 526 netif_stop_queue(dev);
527 dev_warn(&dev->dev, "Unexpected full queue\n");
528 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
529 vi->svq->vq_ops->disable_cb(vi->svq);
530 netif_start_queue(dev);
531 goto again;
573 } 532 }
533 return NETDEV_TX_BUSY;
574 } 534 }
575done:
576 vi->svq->vq_ops->kick(vi->svq);
577 return NETDEV_TX_OK;
578 535
579stop_queue: 536 vi->svq->vq_ops->kick(vi->svq);
580 pr_debug("%s: virtio not prepared to send\n", dev->name); 537 /* Don't wait up for transmitted skbs to be freed. */
581 netif_stop_queue(dev); 538 skb_orphan(skb);
582 539 nf_reset(skb);
583 /* Activate callback for using skbs: if this returns false it 540
584 * means some were used in the meantime. */ 541 /* Apparently nice girls don't return TX_BUSY; stop the queue
585 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 542 * before it gets out of hand. Naturally, this wastes entries. */
586 vi->svq->vq_ops->disable_cb(vi->svq); 543 if (capacity < 2+MAX_SKB_FRAGS) {
587 netif_start_queue(dev); 544 netif_stop_queue(dev);
588 goto again; 545 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
589 } 546 /* More just got used, free them then recheck. */
590 if (skb) { 547 capacity += free_old_xmit_skbs(vi);
591 /* Drop this skb: we only queue one. */ 548 if (capacity >= 2+MAX_SKB_FRAGS) {
592 vi->dev->stats.tx_dropped++; 549 netif_start_queue(dev);
593 kfree_skb(skb); 550 vi->svq->vq_ops->disable_cb(vi->svq);
551 }
552 }
594 } 553 }
595 goto done; 554
555 return NETDEV_TX_OK;
596} 556}
597 557
598static int virtnet_set_mac_address(struct net_device *dev, void *p) 558static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -925,10 +885,6 @@ static int virtnet_probe(struct virtio_device *vdev)
925 vi->pages = NULL; 885 vi->pages = NULL;
926 INIT_DELAYED_WORK(&vi->refill, refill_work); 886 INIT_DELAYED_WORK(&vi->refill, refill_work);
927 887
928 /* If they give us a callback when all buffers are done, we don't need
929 * the timer. */
930 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
931
932 /* If we can receive ANY GSO packets, we must allocate large ones. */ 888 /* If we can receive ANY GSO packets, we must allocate large ones. */
933 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) 889 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
934 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) 890 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -960,11 +916,6 @@ static int virtnet_probe(struct virtio_device *vdev)
960 skb_queue_head_init(&vi->recv); 916 skb_queue_head_init(&vi->recv);
961 skb_queue_head_init(&vi->send); 917 skb_queue_head_init(&vi->send);
962 918
963 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
964
965 if (!vi->free_in_tasklet)
966 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
967
968 err = register_netdev(dev); 919 err = register_netdev(dev);
969 if (err) { 920 if (err) {
970 pr_debug("virtio_net: registering device failed\n"); 921 pr_debug("virtio_net: registering device failed\n");
@@ -1005,9 +956,6 @@ static void virtnet_remove(struct virtio_device *vdev)
1005 /* Stop all the virtqueues. */ 956 /* Stop all the virtqueues. */
1006 vdev->config->reset(vdev); 957 vdev->config->reset(vdev);
1007 958
1008 if (!vi->free_in_tasklet)
1009 del_timer_sync(&vi->xmit_free_timer);
1010
1011 /* Free our skbs in send and recv queues, if any. */ 959 /* Free our skbs in send and recv queues, if any. */
1012 while ((skb = __skb_dequeue(&vi->recv)) != NULL) { 960 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
1013 kfree_skb(skb); 961 kfree_skb(skb);
@@ -1041,7 +989,6 @@ static unsigned int features[] = {
1041 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 989 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1042 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 990 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1043 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 991 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1044 VIRTIO_F_NOTIFY_ON_EMPTY,
1045}; 992};
1046 993
1047static struct virtio_driver virtio_net = { 994static struct virtio_driver virtio_net = {
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index a9d926b7d805..e7be66dbac21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -406,7 +406,6 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
406 __func__, status); 406 __func__, status);
407 return retval; 407 return retval;
408 } 408 }
409 info->hardware_id.string[sizeof(info->hardware_id.length) - 1] = '\0';
410 409
411 if (info->current_status && (info->valid & ACPI_VALID_HID) && 410 if (info->current_status && (info->valid & ACPI_VALID_HID) &&
412 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || 411 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index da3c08b3dcc1..749e2102b2be 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -624,7 +624,7 @@ static int notify_brn(void)
624 struct backlight_device *bd = eeepc_backlight_device; 624 struct backlight_device *bd = eeepc_backlight_device;
625 if (bd) { 625 if (bd) {
626 int old = bd->props.brightness; 626 int old = bd->props.brightness;
627 bd->props.brightness = read_brightness(bd); 627 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
628 return old; 628 return old;
629 } 629 }
630 return -1; 630 return -1;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f78d27503925..3910f2f3eada 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#define TPACPI_VERSION "0.23" 24#define TPACPI_VERSION "0.23"
25#define TPACPI_SYSFS_VERSION 0x020400 25#define TPACPI_SYSFS_VERSION 0x020500
26 26
27/* 27/*
28 * Changelog: 28 * Changelog:
@@ -145,6 +145,51 @@ enum {
145 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */ 145 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */
146}; 146};
147 147
148/* HKEY events */
149enum tpacpi_hkey_event_t {
150 /* Hotkey-related */
151 TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */
152 TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */
153 TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */
154 TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
155 TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
156 TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
157
158 /* Reasons for waking up from S3/S4 */
159 TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
160 TP_HKEY_EV_WKUP_S4_UNDOCK = 0x2404, /* undock requested, S4 */
161 TP_HKEY_EV_WKUP_S3_BAYEJ = 0x2305, /* bay ejection req, S3 */
162 TP_HKEY_EV_WKUP_S4_BAYEJ = 0x2405, /* bay ejection req, S4 */
163 TP_HKEY_EV_WKUP_S3_BATLOW = 0x2313, /* battery empty, S3 */
164 TP_HKEY_EV_WKUP_S4_BATLOW = 0x2413, /* battery empty, S4 */
165
166 /* Auto-sleep after eject request */
167 TP_HKEY_EV_BAYEJ_ACK = 0x3003, /* bay ejection complete */
168 TP_HKEY_EV_UNDOCK_ACK = 0x4003, /* undock complete */
169
170 /* Misc bay events */
171 TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
172
173 /* User-interface events */
174 TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
175 TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
176 TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
177 TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
178 TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
179 TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
180 TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
181
182 /* Thermal events */
183 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
184 TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
185 TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
186 TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
187 TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
188
189 /* Misc */
190 TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
191};
192
148/**************************************************************************** 193/****************************************************************************
149 * Main driver 194 * Main driver
150 */ 195 */
@@ -1848,6 +1893,27 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
1848 * Hotkey subdriver 1893 * Hotkey subdriver
1849 */ 1894 */
1850 1895
1896/*
1897 * ThinkPad firmware event model
1898 *
1899 * The ThinkPad firmware has two main event interfaces: normal ACPI
1900 * notifications (which follow the ACPI standard), and a private event
1901 * interface.
1902 *
1903 * The private event interface also issues events for the hotkeys. As
1904 * the driver gained features, the event handling code ended up being
1905 * built around the hotkey subdriver. This will need to be refactored
1906 * to a more formal event API eventually.
1907 *
1908 * Some "hotkeys" are actually supposed to be used as event reports,
1909 * such as "brightness has changed", "volume has changed", depending on
1910 * the ThinkPad model and how the firmware is operating.
1911 *
1912 * Unlike other classes, hotkey-class events have mask/unmask control on
1913 * non-ancient firmware. However, how it behaves changes a lot with the
1914 * firmware model and version.
1915 */
1916
1851enum { /* hot key scan codes (derived from ACPI DSDT) */ 1917enum { /* hot key scan codes (derived from ACPI DSDT) */
1852 TP_ACPI_HOTKEYSCAN_FNF1 = 0, 1918 TP_ACPI_HOTKEYSCAN_FNF1 = 0,
1853 TP_ACPI_HOTKEYSCAN_FNF2, 1919 TP_ACPI_HOTKEYSCAN_FNF2,
@@ -1875,7 +1941,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
1875 TP_ACPI_HOTKEYSCAN_THINKPAD, 1941 TP_ACPI_HOTKEYSCAN_THINKPAD,
1876}; 1942};
1877 1943
1878enum { /* Keys available through NVRAM polling */ 1944enum { /* Keys/events available through NVRAM polling */
1879 TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U, 1945 TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
1880 TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U, 1946 TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
1881}; 1947};
@@ -1930,8 +1996,11 @@ static struct task_struct *tpacpi_hotkey_task;
1930static struct mutex hotkey_thread_mutex; 1996static struct mutex hotkey_thread_mutex;
1931 1997
1932/* 1998/*
1933 * Acquire mutex to write poller control variables. 1999 * Acquire mutex to write poller control variables as an
1934 * Increment hotkey_config_change when changing them. 2000 * atomic block.
2001 *
2002 * Increment hotkey_config_change when changing them if you
2003 * want the kthread to forget old state.
1935 * 2004 *
1936 * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END 2005 * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
1937 */ 2006 */
@@ -1942,6 +2011,11 @@ static unsigned int hotkey_config_change;
1942 * hotkey poller control variables 2011 * hotkey poller control variables
1943 * 2012 *
1944 * Must be atomic or readers will also need to acquire mutex 2013 * Must be atomic or readers will also need to acquire mutex
2014 *
2015 * HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
2016 * should be used only when the changes need to be taken as
2017 * a block, OR when one needs to force the kthread to forget
2018 * old state.
1945 */ 2019 */
1946static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */ 2020static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
1947static unsigned int hotkey_poll_freq = 10; /* Hz */ 2021static unsigned int hotkey_poll_freq = 10; /* Hz */
@@ -1972,10 +2046,12 @@ static enum { /* Reasons for waking up */
1972 2046
1973static int hotkey_autosleep_ack; 2047static int hotkey_autosleep_ack;
1974 2048
1975static u32 hotkey_orig_mask; 2049static u32 hotkey_orig_mask; /* events the BIOS had enabled */
1976static u32 hotkey_all_mask; 2050static u32 hotkey_all_mask; /* all events supported in fw */
1977static u32 hotkey_reserved_mask; 2051static u32 hotkey_reserved_mask; /* events better left disabled */
1978static u32 hotkey_mask; 2052static u32 hotkey_driver_mask; /* events needed by the driver */
2053static u32 hotkey_user_mask; /* events visible to userspace */
2054static u32 hotkey_acpi_mask; /* events enabled in firmware */
1979 2055
1980static unsigned int hotkey_report_mode; 2056static unsigned int hotkey_report_mode;
1981 2057
@@ -1983,6 +2059,9 @@ static u16 *hotkey_keycode_map;
1983 2059
1984static struct attribute_set *hotkey_dev_attributes; 2060static struct attribute_set *hotkey_dev_attributes;
1985 2061
2062static void tpacpi_driver_event(const unsigned int hkey_event);
2063static void hotkey_driver_event(const unsigned int scancode);
2064
1986/* HKEY.MHKG() return bits */ 2065/* HKEY.MHKG() return bits */
1987#define TP_HOTKEY_TABLET_MASK (1 << 3) 2066#define TP_HOTKEY_TABLET_MASK (1 << 3)
1988 2067
@@ -2017,24 +2096,53 @@ static int hotkey_get_tablet_mode(int *status)
2017} 2096}
2018 2097
2019/* 2098/*
2099 * Reads current event mask from firmware, and updates
2100 * hotkey_acpi_mask accordingly. Also resets any bits
2101 * from hotkey_user_mask that are unavailable to be
2102 * delivered (shadow requirement of the userspace ABI).
2103 *
2020 * Call with hotkey_mutex held 2104 * Call with hotkey_mutex held
2021 */ 2105 */
2022static int hotkey_mask_get(void) 2106static int hotkey_mask_get(void)
2023{ 2107{
2024 u32 m = 0;
2025
2026 if (tp_features.hotkey_mask) { 2108 if (tp_features.hotkey_mask) {
2109 u32 m = 0;
2110
2027 if (!acpi_evalf(hkey_handle, &m, "DHKN", "d")) 2111 if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
2028 return -EIO; 2112 return -EIO;
2113
2114 hotkey_acpi_mask = m;
2115 } else {
2116 /* no mask support doesn't mean no event support... */
2117 hotkey_acpi_mask = hotkey_all_mask;
2029 } 2118 }
2030 HOTKEY_CONFIG_CRITICAL_START 2119
2031 hotkey_mask = m | (hotkey_source_mask & hotkey_mask); 2120 /* sync userspace-visible mask */
2032 HOTKEY_CONFIG_CRITICAL_END 2121 hotkey_user_mask &= (hotkey_acpi_mask | hotkey_source_mask);
2033 2122
2034 return 0; 2123 return 0;
2035} 2124}
2036 2125
2126void static hotkey_mask_warn_incomplete_mask(void)
2127{
2128 /* log only what the user can fix... */
2129 const u32 wantedmask = hotkey_driver_mask &
2130 ~(hotkey_acpi_mask | hotkey_source_mask) &
2131 (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
2132
2133 if (wantedmask)
2134 printk(TPACPI_NOTICE
2135 "required events 0x%08x not enabled!\n",
2136 wantedmask);
2137}
2138
2037/* 2139/*
2140 * Set the firmware mask when supported
2141 *
2142 * Also calls hotkey_mask_get to update hotkey_acpi_mask.
2143 *
2144 * NOTE: does not set bits in hotkey_user_mask, but may reset them.
2145 *
2038 * Call with hotkey_mutex held 2146 * Call with hotkey_mutex held
2039 */ 2147 */
2040static int hotkey_mask_set(u32 mask) 2148static int hotkey_mask_set(u32 mask)
@@ -2042,66 +2150,98 @@ static int hotkey_mask_set(u32 mask)
2042 int i; 2150 int i;
2043 int rc = 0; 2151 int rc = 0;
2044 2152
2045 if (tp_features.hotkey_mask) { 2153 const u32 fwmask = mask & ~hotkey_source_mask;
2046 if (!tp_warned.hotkey_mask_ff &&
2047 (mask == 0xffff || mask == 0xffffff ||
2048 mask == 0xffffffff)) {
2049 tp_warned.hotkey_mask_ff = 1;
2050 printk(TPACPI_NOTICE
2051 "setting the hotkey mask to 0x%08x is likely "
2052 "not the best way to go about it\n", mask);
2053 printk(TPACPI_NOTICE
2054 "please consider using the driver defaults, "
2055 "and refer to up-to-date thinkpad-acpi "
2056 "documentation\n");
2057 }
2058 2154
2059 HOTKEY_CONFIG_CRITICAL_START 2155 if (tp_features.hotkey_mask) {
2060 for (i = 0; i < 32; i++) { 2156 for (i = 0; i < 32; i++) {
2061 u32 m = 1 << i;
2062 /* enable in firmware mask only keys not in NVRAM
2063 * mode, but enable the key in the cached hotkey_mask
2064 * regardless of mode, or the key will end up
2065 * disabled by hotkey_mask_get() */
2066 if (!acpi_evalf(hkey_handle, 2157 if (!acpi_evalf(hkey_handle,
2067 NULL, "MHKM", "vdd", i + 1, 2158 NULL, "MHKM", "vdd", i + 1,
2068 !!((mask & ~hotkey_source_mask) & m))) { 2159 !!(mask & (1 << i)))) {
2069 rc = -EIO; 2160 rc = -EIO;
2070 break; 2161 break;
2071 } else {
2072 hotkey_mask = (hotkey_mask & ~m) | (mask & m);
2073 } 2162 }
2074 } 2163 }
2075 HOTKEY_CONFIG_CRITICAL_END 2164 }
2076 2165
2077 /* hotkey_mask_get must be called unconditionally below */ 2166 /*
2078 if (!hotkey_mask_get() && !rc && 2167 * We *must* make an inconditional call to hotkey_mask_get to
2079 (hotkey_mask & ~hotkey_source_mask) != 2168 * refresh hotkey_acpi_mask and update hotkey_user_mask
2080 (mask & ~hotkey_source_mask)) { 2169 *
2081 printk(TPACPI_NOTICE 2170 * Take the opportunity to also log when we cannot _enable_
2082 "requested hot key mask 0x%08x, but " 2171 * a given event.
2083 "firmware forced it to 0x%08x\n", 2172 */
2084 mask, hotkey_mask); 2173 if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
2085 } 2174 printk(TPACPI_NOTICE
2086 } else { 2175 "asked for hotkey mask 0x%08x, but "
2087#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2176 "firmware forced it to 0x%08x\n",
2088 HOTKEY_CONFIG_CRITICAL_START 2177 fwmask, hotkey_acpi_mask);
2089 hotkey_mask = mask & hotkey_source_mask;
2090 HOTKEY_CONFIG_CRITICAL_END
2091 hotkey_mask_get();
2092 if (hotkey_mask != mask) {
2093 printk(TPACPI_NOTICE
2094 "requested hot key mask 0x%08x, "
2095 "forced to 0x%08x (NVRAM poll mask is "
2096 "0x%08x): no firmware mask support\n",
2097 mask, hotkey_mask, hotkey_source_mask);
2098 }
2099#else
2100 hotkey_mask_get();
2101 rc = -ENXIO;
2102#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
2103 } 2178 }
2104 2179
2180 hotkey_mask_warn_incomplete_mask();
2181
2182 return rc;
2183}
2184
2185/*
2186 * Sets hotkey_user_mask and tries to set the firmware mask
2187 *
2188 * Call with hotkey_mutex held
2189 */
2190static int hotkey_user_mask_set(const u32 mask)
2191{
2192 int rc;
2193
2194 /* Give people a chance to notice they are doing something that
2195 * is bound to go boom on their users sooner or later */
2196 if (!tp_warned.hotkey_mask_ff &&
2197 (mask == 0xffff || mask == 0xffffff ||
2198 mask == 0xffffffff)) {
2199 tp_warned.hotkey_mask_ff = 1;
2200 printk(TPACPI_NOTICE
2201 "setting the hotkey mask to 0x%08x is likely "
2202 "not the best way to go about it\n", mask);
2203 printk(TPACPI_NOTICE
2204 "please consider using the driver defaults, "
2205 "and refer to up-to-date thinkpad-acpi "
2206 "documentation\n");
2207 }
2208
2209 /* Try to enable what the user asked for, plus whatever we need.
2210 * this syncs everything but won't enable bits in hotkey_user_mask */
2211 rc = hotkey_mask_set((mask | hotkey_driver_mask) & ~hotkey_source_mask);
2212
2213 /* Enable the available bits in hotkey_user_mask */
2214 hotkey_user_mask = mask & (hotkey_acpi_mask | hotkey_source_mask);
2215
2216 return rc;
2217}
2218
2219/*
2220 * Sets the driver hotkey mask.
2221 *
2222 * Can be called even if the hotkey subdriver is inactive
2223 */
2224static int tpacpi_hotkey_driver_mask_set(const u32 mask)
2225{
2226 int rc;
2227
2228 /* Do the right thing if hotkey_init has not been called yet */
2229 if (!tp_features.hotkey) {
2230 hotkey_driver_mask = mask;
2231 return 0;
2232 }
2233
2234 mutex_lock(&hotkey_mutex);
2235
2236 HOTKEY_CONFIG_CRITICAL_START
2237 hotkey_driver_mask = mask;
2238 hotkey_source_mask |= (mask & ~hotkey_all_mask);
2239 HOTKEY_CONFIG_CRITICAL_END
2240
2241 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
2242 ~hotkey_source_mask);
2243 mutex_unlock(&hotkey_mutex);
2244
2105 return rc; 2245 return rc;
2106} 2246}
2107 2247
@@ -2137,11 +2277,10 @@ static void tpacpi_input_send_tabletsw(void)
2137 } 2277 }
2138} 2278}
2139 2279
2140static void tpacpi_input_send_key(unsigned int scancode) 2280/* Do NOT call without validating scancode first */
2281static void tpacpi_input_send_key(const unsigned int scancode)
2141{ 2282{
2142 unsigned int keycode; 2283 const unsigned int keycode = hotkey_keycode_map[scancode];
2143
2144 keycode = hotkey_keycode_map[scancode];
2145 2284
2146 if (keycode != KEY_RESERVED) { 2285 if (keycode != KEY_RESERVED) {
2147 mutex_lock(&tpacpi_inputdev_send_mutex); 2286 mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -2162,19 +2301,28 @@ static void tpacpi_input_send_key(unsigned int scancode)
2162 } 2301 }
2163} 2302}
2164 2303
2304/* Do NOT call without validating scancode first */
2305static void tpacpi_input_send_key_masked(const unsigned int scancode)
2306{
2307 hotkey_driver_event(scancode);
2308 if (hotkey_user_mask & (1 << scancode))
2309 tpacpi_input_send_key(scancode);
2310}
2311
2165#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2312#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2166static struct tp_acpi_drv_struct ibm_hotkey_acpidriver; 2313static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
2167 2314
2315/* Do NOT call without validating scancode first */
2168static void tpacpi_hotkey_send_key(unsigned int scancode) 2316static void tpacpi_hotkey_send_key(unsigned int scancode)
2169{ 2317{
2170 tpacpi_input_send_key(scancode); 2318 tpacpi_input_send_key_masked(scancode);
2171 if (hotkey_report_mode < 2) { 2319 if (hotkey_report_mode < 2) {
2172 acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device, 2320 acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
2173 0x80, 0x1001 + scancode); 2321 0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
2174 } 2322 }
2175} 2323}
2176 2324
2177static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m) 2325static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
2178{ 2326{
2179 u8 d; 2327 u8 d;
2180 2328
@@ -2210,21 +2358,24 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
2210 } 2358 }
2211} 2359}
2212 2360
2361static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2362 struct tp_nvram_state *newn,
2363 const u32 event_mask)
2364{
2365
2213#define TPACPI_COMPARE_KEY(__scancode, __member) \ 2366#define TPACPI_COMPARE_KEY(__scancode, __member) \
2214 do { \ 2367 do { \
2215 if ((mask & (1 << __scancode)) && \ 2368 if ((event_mask & (1 << __scancode)) && \
2216 oldn->__member != newn->__member) \ 2369 oldn->__member != newn->__member) \
2217 tpacpi_hotkey_send_key(__scancode); \ 2370 tpacpi_hotkey_send_key(__scancode); \
2218 } while (0) 2371 } while (0)
2219 2372
2220#define TPACPI_MAY_SEND_KEY(__scancode) \ 2373#define TPACPI_MAY_SEND_KEY(__scancode) \
2221 do { if (mask & (1 << __scancode)) \ 2374 do { \
2222 tpacpi_hotkey_send_key(__scancode); } while (0) 2375 if (event_mask & (1 << __scancode)) \
2376 tpacpi_hotkey_send_key(__scancode); \
2377 } while (0)
2223 2378
2224static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2225 struct tp_nvram_state *newn,
2226 u32 mask)
2227{
2228 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); 2379 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
2229 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); 2380 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
2230 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); 2381 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2270,15 +2421,22 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2270 } 2421 }
2271 } 2422 }
2272 } 2423 }
2273}
2274 2424
2275#undef TPACPI_COMPARE_KEY 2425#undef TPACPI_COMPARE_KEY
2276#undef TPACPI_MAY_SEND_KEY 2426#undef TPACPI_MAY_SEND_KEY
2427}
2277 2428
2429/*
2430 * Polling driver
2431 *
2432 * We track all events in hotkey_source_mask all the time, since
2433 * most of them are edge-based. We only issue those requested by
2434 * hotkey_user_mask or hotkey_driver_mask, though.
2435 */
2278static int hotkey_kthread(void *data) 2436static int hotkey_kthread(void *data)
2279{ 2437{
2280 struct tp_nvram_state s[2]; 2438 struct tp_nvram_state s[2];
2281 u32 mask; 2439 u32 poll_mask, event_mask;
2282 unsigned int si, so; 2440 unsigned int si, so;
2283 unsigned long t; 2441 unsigned long t;
2284 unsigned int change_detector, must_reset; 2442 unsigned int change_detector, must_reset;
@@ -2298,10 +2456,12 @@ static int hotkey_kthread(void *data)
2298 /* Initial state for compares */ 2456 /* Initial state for compares */
2299 mutex_lock(&hotkey_thread_data_mutex); 2457 mutex_lock(&hotkey_thread_data_mutex);
2300 change_detector = hotkey_config_change; 2458 change_detector = hotkey_config_change;
2301 mask = hotkey_source_mask & hotkey_mask; 2459 poll_mask = hotkey_source_mask;
2460 event_mask = hotkey_source_mask &
2461 (hotkey_driver_mask | hotkey_user_mask);
2302 poll_freq = hotkey_poll_freq; 2462 poll_freq = hotkey_poll_freq;
2303 mutex_unlock(&hotkey_thread_data_mutex); 2463 mutex_unlock(&hotkey_thread_data_mutex);
2304 hotkey_read_nvram(&s[so], mask); 2464 hotkey_read_nvram(&s[so], poll_mask);
2305 2465
2306 while (!kthread_should_stop()) { 2466 while (!kthread_should_stop()) {
2307 if (t == 0) { 2467 if (t == 0) {
@@ -2324,15 +2484,17 @@ static int hotkey_kthread(void *data)
2324 t = 0; 2484 t = 0;
2325 change_detector = hotkey_config_change; 2485 change_detector = hotkey_config_change;
2326 } 2486 }
2327 mask = hotkey_source_mask & hotkey_mask; 2487 poll_mask = hotkey_source_mask;
2488 event_mask = hotkey_source_mask &
2489 (hotkey_driver_mask | hotkey_user_mask);
2328 poll_freq = hotkey_poll_freq; 2490 poll_freq = hotkey_poll_freq;
2329 mutex_unlock(&hotkey_thread_data_mutex); 2491 mutex_unlock(&hotkey_thread_data_mutex);
2330 2492
2331 if (likely(mask)) { 2493 if (likely(poll_mask)) {
2332 hotkey_read_nvram(&s[si], mask); 2494 hotkey_read_nvram(&s[si], poll_mask);
2333 if (likely(si != so)) { 2495 if (likely(si != so)) {
2334 hotkey_compare_and_issue_event(&s[so], &s[si], 2496 hotkey_compare_and_issue_event(&s[so], &s[si],
2335 mask); 2497 event_mask);
2336 } 2498 }
2337 } 2499 }
2338 2500
@@ -2364,10 +2526,12 @@ static void hotkey_poll_stop_sync(void)
2364/* call with hotkey_mutex held */ 2526/* call with hotkey_mutex held */
2365static void hotkey_poll_setup(bool may_warn) 2527static void hotkey_poll_setup(bool may_warn)
2366{ 2528{
2367 u32 hotkeys_to_poll = hotkey_source_mask & hotkey_mask; 2529 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
2530 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
2368 2531
2369 if (hotkeys_to_poll != 0 && hotkey_poll_freq > 0 && 2532 if (hotkey_poll_freq > 0 &&
2370 (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) { 2533 (poll_driver_mask ||
2534 (poll_user_mask && tpacpi_inputdev->users > 0))) {
2371 if (!tpacpi_hotkey_task) { 2535 if (!tpacpi_hotkey_task) {
2372 tpacpi_hotkey_task = kthread_run(hotkey_kthread, 2536 tpacpi_hotkey_task = kthread_run(hotkey_kthread,
2373 NULL, TPACPI_NVRAM_KTHREAD_NAME); 2537 NULL, TPACPI_NVRAM_KTHREAD_NAME);
@@ -2380,12 +2544,13 @@ static void hotkey_poll_setup(bool may_warn)
2380 } 2544 }
2381 } else { 2545 } else {
2382 hotkey_poll_stop_sync(); 2546 hotkey_poll_stop_sync();
2383 if (may_warn && hotkeys_to_poll != 0 && 2547 if (may_warn && (poll_driver_mask || poll_user_mask) &&
2384 hotkey_poll_freq == 0) { 2548 hotkey_poll_freq == 0) {
2385 printk(TPACPI_NOTICE 2549 printk(TPACPI_NOTICE
2386 "hot keys 0x%08x require polling, " 2550 "hot keys 0x%08x and/or events 0x%08x "
2387 "which is currently disabled\n", 2551 "require polling, which is currently "
2388 hotkeys_to_poll); 2552 "disabled\n",
2553 poll_user_mask, poll_driver_mask);
2389 } 2554 }
2390 } 2555 }
2391} 2556}
@@ -2403,9 +2568,7 @@ static void hotkey_poll_set_freq(unsigned int freq)
2403 if (!freq) 2568 if (!freq)
2404 hotkey_poll_stop_sync(); 2569 hotkey_poll_stop_sync();
2405 2570
2406 HOTKEY_CONFIG_CRITICAL_START
2407 hotkey_poll_freq = freq; 2571 hotkey_poll_freq = freq;
2408 HOTKEY_CONFIG_CRITICAL_END
2409} 2572}
2410 2573
2411#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ 2574#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -2440,7 +2603,8 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2440static void hotkey_inputdev_close(struct input_dev *dev) 2603static void hotkey_inputdev_close(struct input_dev *dev)
2441{ 2604{
2442 /* disable hotkey polling when possible */ 2605 /* disable hotkey polling when possible */
2443 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING) 2606 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
2607 !(hotkey_source_mask & hotkey_driver_mask))
2444 hotkey_poll_setup_safe(false); 2608 hotkey_poll_setup_safe(false);
2445} 2609}
2446 2610
@@ -2488,15 +2652,7 @@ static ssize_t hotkey_mask_show(struct device *dev,
2488 struct device_attribute *attr, 2652 struct device_attribute *attr,
2489 char *buf) 2653 char *buf)
2490{ 2654{
2491 int res; 2655 return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_user_mask);
2492
2493 if (mutex_lock_killable(&hotkey_mutex))
2494 return -ERESTARTSYS;
2495 res = hotkey_mask_get();
2496 mutex_unlock(&hotkey_mutex);
2497
2498 return (res)?
2499 res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
2500} 2656}
2501 2657
2502static ssize_t hotkey_mask_store(struct device *dev, 2658static ssize_t hotkey_mask_store(struct device *dev,
@@ -2512,7 +2668,7 @@ static ssize_t hotkey_mask_store(struct device *dev,
2512 if (mutex_lock_killable(&hotkey_mutex)) 2668 if (mutex_lock_killable(&hotkey_mutex))
2513 return -ERESTARTSYS; 2669 return -ERESTARTSYS;
2514 2670
2515 res = hotkey_mask_set(t); 2671 res = hotkey_user_mask_set(t);
2516 2672
2517#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2673#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2518 hotkey_poll_setup(true); 2674 hotkey_poll_setup(true);
@@ -2594,6 +2750,8 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2594 const char *buf, size_t count) 2750 const char *buf, size_t count)
2595{ 2751{
2596 unsigned long t; 2752 unsigned long t;
2753 u32 r_ev;
2754 int rc;
2597 2755
2598 if (parse_strtoul(buf, 0xffffffffUL, &t) || 2756 if (parse_strtoul(buf, 0xffffffffUL, &t) ||
2599 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0)) 2757 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
@@ -2606,14 +2764,28 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2606 hotkey_source_mask = t; 2764 hotkey_source_mask = t;
2607 HOTKEY_CONFIG_CRITICAL_END 2765 HOTKEY_CONFIG_CRITICAL_END
2608 2766
2767 rc = hotkey_mask_set((hotkey_user_mask | hotkey_driver_mask) &
2768 ~hotkey_source_mask);
2609 hotkey_poll_setup(true); 2769 hotkey_poll_setup(true);
2610 hotkey_mask_set(hotkey_mask); 2770
2771 /* check if events needed by the driver got disabled */
2772 r_ev = hotkey_driver_mask & ~(hotkey_acpi_mask & hotkey_all_mask)
2773 & ~hotkey_source_mask & TPACPI_HKEY_NVRAM_KNOWN_MASK;
2611 2774
2612 mutex_unlock(&hotkey_mutex); 2775 mutex_unlock(&hotkey_mutex);
2613 2776
2777 if (rc < 0)
2778 printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
2779 "firmware event mask!\n");
2780
2781 if (r_ev)
2782 printk(TPACPI_NOTICE "hotkey_source_mask: "
2783 "some important events were disabled: "
2784 "0x%04x\n", r_ev);
2785
2614 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t); 2786 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
2615 2787
2616 return count; 2788 return (rc < 0) ? rc : count;
2617} 2789}
2618 2790
2619static struct device_attribute dev_attr_hotkey_source_mask = 2791static struct device_attribute dev_attr_hotkey_source_mask =
@@ -2731,9 +2903,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_reason =
2731 2903
2732static void hotkey_wakeup_reason_notify_change(void) 2904static void hotkey_wakeup_reason_notify_change(void)
2733{ 2905{
2734 if (tp_features.hotkey_mask) 2906 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
2735 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 2907 "wakeup_reason");
2736 "wakeup_reason");
2737} 2908}
2738 2909
2739/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */ 2910/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
@@ -2750,9 +2921,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
2750 2921
2751static void hotkey_wakeup_hotunplug_complete_notify_change(void) 2922static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2752{ 2923{
2753 if (tp_features.hotkey_mask) 2924 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
2754 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 2925 "wakeup_hotunplug_complete");
2755 "wakeup_hotunplug_complete");
2756} 2926}
2757 2927
2758/* --------------------------------------------------------------------- */ 2928/* --------------------------------------------------------------------- */
@@ -2760,27 +2930,19 @@ static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2760static struct attribute *hotkey_attributes[] __initdata = { 2930static struct attribute *hotkey_attributes[] __initdata = {
2761 &dev_attr_hotkey_enable.attr, 2931 &dev_attr_hotkey_enable.attr,
2762 &dev_attr_hotkey_bios_enabled.attr, 2932 &dev_attr_hotkey_bios_enabled.attr,
2933 &dev_attr_hotkey_bios_mask.attr,
2763 &dev_attr_hotkey_report_mode.attr, 2934 &dev_attr_hotkey_report_mode.attr,
2764#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2935 &dev_attr_hotkey_wakeup_reason.attr,
2936 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
2765 &dev_attr_hotkey_mask.attr, 2937 &dev_attr_hotkey_mask.attr,
2766 &dev_attr_hotkey_all_mask.attr, 2938 &dev_attr_hotkey_all_mask.attr,
2767 &dev_attr_hotkey_recommended_mask.attr, 2939 &dev_attr_hotkey_recommended_mask.attr,
2940#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2768 &dev_attr_hotkey_source_mask.attr, 2941 &dev_attr_hotkey_source_mask.attr,
2769 &dev_attr_hotkey_poll_freq.attr, 2942 &dev_attr_hotkey_poll_freq.attr,
2770#endif 2943#endif
2771}; 2944};
2772 2945
2773static struct attribute *hotkey_mask_attributes[] __initdata = {
2774 &dev_attr_hotkey_bios_mask.attr,
2775#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2776 &dev_attr_hotkey_mask.attr,
2777 &dev_attr_hotkey_all_mask.attr,
2778 &dev_attr_hotkey_recommended_mask.attr,
2779#endif
2780 &dev_attr_hotkey_wakeup_reason.attr,
2781 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
2782};
2783
2784/* 2946/*
2785 * Sync both the hw and sw blocking state of all switches 2947 * Sync both the hw and sw blocking state of all switches
2786 */ 2948 */
@@ -2843,16 +3005,16 @@ static void hotkey_exit(void)
2843 3005
2844 kfree(hotkey_keycode_map); 3006 kfree(hotkey_keycode_map);
2845 3007
2846 if (tp_features.hotkey) { 3008 dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
2847 dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY, 3009 "restoring original HKEY status and mask\n");
2848 "restoring original hot key mask\n"); 3010 /* yes, there is a bitwise or below, we want the
2849 /* no short-circuit boolean operator below! */ 3011 * functions to be called even if one of them fail */
2850 if ((hotkey_mask_set(hotkey_orig_mask) | 3012 if (((tp_features.hotkey_mask &&
2851 hotkey_status_set(false)) != 0) 3013 hotkey_mask_set(hotkey_orig_mask)) |
2852 printk(TPACPI_ERR 3014 hotkey_status_set(false)) != 0)
2853 "failed to restore hot key mask " 3015 printk(TPACPI_ERR
2854 "to BIOS defaults\n"); 3016 "failed to restore hot key mask "
2855 } 3017 "to BIOS defaults\n");
2856} 3018}
2857 3019
2858static void __init hotkey_unmap(const unsigned int scancode) 3020static void __init hotkey_unmap(const unsigned int scancode)
@@ -2864,6 +3026,35 @@ static void __init hotkey_unmap(const unsigned int scancode)
2864 } 3026 }
2865} 3027}
2866 3028
3029/*
3030 * HKEY quirks:
3031 * TPACPI_HK_Q_INIMASK: Supports FN+F3,FN+F4,FN+F12
3032 */
3033
3034#define TPACPI_HK_Q_INIMASK 0x0001
3035
3036static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3037 TPACPI_Q_IBM('I', 'H', TPACPI_HK_Q_INIMASK), /* 600E */
3038 TPACPI_Q_IBM('I', 'N', TPACPI_HK_Q_INIMASK), /* 600E */
3039 TPACPI_Q_IBM('I', 'D', TPACPI_HK_Q_INIMASK), /* 770, 770E, 770ED */
3040 TPACPI_Q_IBM('I', 'W', TPACPI_HK_Q_INIMASK), /* A20m */
3041 TPACPI_Q_IBM('I', 'V', TPACPI_HK_Q_INIMASK), /* A20p */
3042 TPACPI_Q_IBM('1', '0', TPACPI_HK_Q_INIMASK), /* A21e, A22e */
3043 TPACPI_Q_IBM('K', 'U', TPACPI_HK_Q_INIMASK), /* A21e */
3044 TPACPI_Q_IBM('K', 'X', TPACPI_HK_Q_INIMASK), /* A21m, A22m */
3045 TPACPI_Q_IBM('K', 'Y', TPACPI_HK_Q_INIMASK), /* A21p, A22p */
3046 TPACPI_Q_IBM('1', 'B', TPACPI_HK_Q_INIMASK), /* A22e */
3047 TPACPI_Q_IBM('1', '3', TPACPI_HK_Q_INIMASK), /* A22m */
3048 TPACPI_Q_IBM('1', 'E', TPACPI_HK_Q_INIMASK), /* A30/p (0) */
3049 TPACPI_Q_IBM('1', 'C', TPACPI_HK_Q_INIMASK), /* R30 */
3050 TPACPI_Q_IBM('1', 'F', TPACPI_HK_Q_INIMASK), /* R31 */
3051 TPACPI_Q_IBM('I', 'Y', TPACPI_HK_Q_INIMASK), /* T20 */
3052 TPACPI_Q_IBM('K', 'Z', TPACPI_HK_Q_INIMASK), /* T21 */
3053 TPACPI_Q_IBM('1', '6', TPACPI_HK_Q_INIMASK), /* T22 */
3054 TPACPI_Q_IBM('I', 'Z', TPACPI_HK_Q_INIMASK), /* X20, X21 */
3055 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3056};
3057
2867static int __init hotkey_init(struct ibm_init_struct *iibm) 3058static int __init hotkey_init(struct ibm_init_struct *iibm)
2868{ 3059{
2869 /* Requirements for changing the default keymaps: 3060 /* Requirements for changing the default keymaps:
@@ -2906,9 +3097,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2906 KEY_UNKNOWN, /* 0x0D: FN+INSERT */ 3097 KEY_UNKNOWN, /* 0x0D: FN+INSERT */
2907 KEY_UNKNOWN, /* 0x0E: FN+DELETE */ 3098 KEY_UNKNOWN, /* 0x0E: FN+DELETE */
2908 3099
2909 /* brightness: firmware always reacts to them, unless 3100 /* brightness: firmware always reacts to them */
2910 * X.org did some tricks in the radeon BIOS scratch
2911 * registers of *some* models */
2912 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ 3101 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
2913 KEY_RESERVED, /* 0x10: FN+END (brightness down) */ 3102 KEY_RESERVED, /* 0x10: FN+END (brightness down) */
2914 3103
@@ -2983,6 +3172,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2983 int status; 3172 int status;
2984 int hkeyv; 3173 int hkeyv;
2985 3174
3175 unsigned long quirks;
3176
2986 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3177 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
2987 "initializing hotkey subdriver\n"); 3178 "initializing hotkey subdriver\n");
2988 3179
@@ -3008,9 +3199,16 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3008 if (!tp_features.hotkey) 3199 if (!tp_features.hotkey)
3009 return 1; 3200 return 1;
3010 3201
3202 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
3203 ARRAY_SIZE(tpacpi_hotkey_qtable));
3204
3011 tpacpi_disable_brightness_delay(); 3205 tpacpi_disable_brightness_delay();
3012 3206
3013 hotkey_dev_attributes = create_attr_set(13, NULL); 3207 /* MUST have enough space for all attributes to be added to
3208 * hotkey_dev_attributes */
3209 hotkey_dev_attributes = create_attr_set(
3210 ARRAY_SIZE(hotkey_attributes) + 2,
3211 NULL);
3014 if (!hotkey_dev_attributes) 3212 if (!hotkey_dev_attributes)
3015 return -ENOMEM; 3213 return -ENOMEM;
3016 res = add_many_to_attr_set(hotkey_dev_attributes, 3214 res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -3019,7 +3217,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3019 if (res) 3217 if (res)
3020 goto err_exit; 3218 goto err_exit;
3021 3219
3022 /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, 3220 /* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
3023 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking 3221 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
3024 for HKEY interface version 0x100 */ 3222 for HKEY interface version 0x100 */
3025 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { 3223 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
@@ -3033,10 +3231,22 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3033 * MHKV 0x100 in A31, R40, R40e, 3231 * MHKV 0x100 in A31, R40, R40e,
3034 * T4x, X31, and later 3232 * T4x, X31, and later
3035 */ 3233 */
3036 tp_features.hotkey_mask = 1;
3037 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3234 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3038 "firmware HKEY interface version: 0x%x\n", 3235 "firmware HKEY interface version: 0x%x\n",
3039 hkeyv); 3236 hkeyv);
3237
3238 /* Paranoia check AND init hotkey_all_mask */
3239 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3240 "MHKA", "qd")) {
3241 printk(TPACPI_ERR
3242 "missing MHKA handler, "
3243 "please report this to %s\n",
3244 TPACPI_MAIL);
3245 /* Fallback: pre-init for FN+F3,F4,F12 */
3246 hotkey_all_mask = 0x080cU;
3247 } else {
3248 tp_features.hotkey_mask = 1;
3249 }
3040 } 3250 }
3041 } 3251 }
3042 3252
@@ -3044,32 +3254,23 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3044 "hotkey masks are %s\n", 3254 "hotkey masks are %s\n",
3045 str_supported(tp_features.hotkey_mask)); 3255 str_supported(tp_features.hotkey_mask));
3046 3256
3047 if (tp_features.hotkey_mask) { 3257 /* Init hotkey_all_mask if not initialized yet */
3048 if (!acpi_evalf(hkey_handle, &hotkey_all_mask, 3258 if (!tp_features.hotkey_mask && !hotkey_all_mask &&
3049 "MHKA", "qd")) { 3259 (quirks & TPACPI_HK_Q_INIMASK))
3050 printk(TPACPI_ERR 3260 hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
3051 "missing MHKA handler, "
3052 "please report this to %s\n",
3053 TPACPI_MAIL);
3054 /* FN+F12, FN+F4, FN+F3 */
3055 hotkey_all_mask = 0x080cU;
3056 }
3057 }
3058 3261
3059 /* hotkey_source_mask *must* be zero for 3262 /* Init hotkey_acpi_mask and hotkey_orig_mask */
3060 * the first hotkey_mask_get */
3061 if (tp_features.hotkey_mask) { 3263 if (tp_features.hotkey_mask) {
3264 /* hotkey_source_mask *must* be zero for
3265 * the first hotkey_mask_get to return hotkey_orig_mask */
3062 res = hotkey_mask_get(); 3266 res = hotkey_mask_get();
3063 if (res) 3267 if (res)
3064 goto err_exit; 3268 goto err_exit;
3065 3269
3066 hotkey_orig_mask = hotkey_mask; 3270 hotkey_orig_mask = hotkey_acpi_mask;
3067 res = add_many_to_attr_set( 3271 } else {
3068 hotkey_dev_attributes, 3272 hotkey_orig_mask = hotkey_all_mask;
3069 hotkey_mask_attributes, 3273 hotkey_acpi_mask = hotkey_all_mask;
3070 ARRAY_SIZE(hotkey_mask_attributes));
3071 if (res)
3072 goto err_exit;
3073 } 3274 }
3074 3275
3075#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES 3276#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
@@ -3183,14 +3384,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3183 } 3384 }
3184 3385
3185#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 3386#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
3186 if (tp_features.hotkey_mask) { 3387 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
3187 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK 3388 & ~hotkey_all_mask
3188 & ~hotkey_all_mask 3389 & ~hotkey_reserved_mask;
3189 & ~hotkey_reserved_mask;
3190 } else {
3191 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
3192 & ~hotkey_reserved_mask;
3193 }
3194 3390
3195 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3391 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3196 "hotkey source mask 0x%08x, polling freq %u\n", 3392 "hotkey source mask 0x%08x, polling freq %u\n",
@@ -3204,13 +3400,18 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3204 hotkey_exit(); 3400 hotkey_exit();
3205 return res; 3401 return res;
3206 } 3402 }
3207 res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask) 3403 res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
3208 & ~hotkey_reserved_mask) 3404 | hotkey_driver_mask)
3209 | hotkey_orig_mask); 3405 & ~hotkey_source_mask);
3210 if (res < 0 && res != -ENXIO) { 3406 if (res < 0 && res != -ENXIO) {
3211 hotkey_exit(); 3407 hotkey_exit();
3212 return res; 3408 return res;
3213 } 3409 }
3410 hotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)
3411 & ~hotkey_reserved_mask;
3412 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3413 "initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
3414 hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
3214 3415
3215 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3416 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3216 "legacy ibm/hotkey event reporting over procfs %s\n", 3417 "legacy ibm/hotkey event reporting over procfs %s\n",
@@ -3245,7 +3446,7 @@ static bool hotkey_notify_hotkey(const u32 hkey,
3245 if (scancode > 0 && scancode < 0x21) { 3446 if (scancode > 0 && scancode < 0x21) {
3246 scancode--; 3447 scancode--;
3247 if (!(hotkey_source_mask & (1 << scancode))) { 3448 if (!(hotkey_source_mask & (1 << scancode))) {
3248 tpacpi_input_send_key(scancode); 3449 tpacpi_input_send_key_masked(scancode);
3249 *send_acpi_ev = false; 3450 *send_acpi_ev = false;
3250 } else { 3451 } else {
3251 *ignore_acpi_ev = true; 3452 *ignore_acpi_ev = true;
@@ -3264,20 +3465,20 @@ static bool hotkey_notify_wakeup(const u32 hkey,
3264 *ignore_acpi_ev = false; 3465 *ignore_acpi_ev = false;
3265 3466
3266 switch (hkey) { 3467 switch (hkey) {
3267 case 0x2304: /* suspend, undock */ 3468 case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
3268 case 0x2404: /* hibernation, undock */ 3469 case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
3269 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK; 3470 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
3270 *ignore_acpi_ev = true; 3471 *ignore_acpi_ev = true;
3271 break; 3472 break;
3272 3473
3273 case 0x2305: /* suspend, bay eject */ 3474 case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
3274 case 0x2405: /* hibernation, bay eject */ 3475 case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
3275 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ; 3476 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
3276 *ignore_acpi_ev = true; 3477 *ignore_acpi_ev = true;
3277 break; 3478 break;
3278 3479
3279 case 0x2313: /* Battery on critical low level (S3) */ 3480 case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
3280 case 0x2413: /* Battery on critical low level (S4) */ 3481 case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
3281 printk(TPACPI_ALERT 3482 printk(TPACPI_ALERT
3282 "EMERGENCY WAKEUP: battery almost empty\n"); 3483 "EMERGENCY WAKEUP: battery almost empty\n");
3283 /* how to auto-heal: */ 3484 /* how to auto-heal: */
@@ -3307,21 +3508,21 @@ static bool hotkey_notify_usrevent(const u32 hkey,
3307 *ignore_acpi_ev = false; 3508 *ignore_acpi_ev = false;
3308 3509
3309 switch (hkey) { 3510 switch (hkey) {
3310 case 0x5010: /* Lenovo new BIOS: brightness changed */ 3511 case TP_HKEY_EV_PEN_INSERTED: /* X61t: tablet pen inserted into bay */
3311 case 0x500b: /* X61t: tablet pen inserted into bay */ 3512 case TP_HKEY_EV_PEN_REMOVED: /* X61t: tablet pen removed from bay */
3312 case 0x500c: /* X61t: tablet pen removed from bay */
3313 return true; 3513 return true;
3314 3514
3315 case 0x5009: /* X41t-X61t: swivel up (tablet mode) */ 3515 case TP_HKEY_EV_TABLET_TABLET: /* X41t-X61t: tablet mode */
3316 case 0x500a: /* X41t-X61t: swivel down (normal mode) */ 3516 case TP_HKEY_EV_TABLET_NOTEBOOK: /* X41t-X61t: normal mode */
3317 tpacpi_input_send_tabletsw(); 3517 tpacpi_input_send_tabletsw();
3318 hotkey_tablet_mode_notify_change(); 3518 hotkey_tablet_mode_notify_change();
3319 *send_acpi_ev = false; 3519 *send_acpi_ev = false;
3320 return true; 3520 return true;
3321 3521
3322 case 0x5001: 3522 case TP_HKEY_EV_LID_CLOSE: /* Lid closed */
3323 case 0x5002: 3523 case TP_HKEY_EV_LID_OPEN: /* Lid opened */
3324 /* LID switch events. Do not propagate */ 3524 case TP_HKEY_EV_BRGHT_CHANGED: /* brightness changed */
3525 /* do not propagate these events */
3325 *ignore_acpi_ev = true; 3526 *ignore_acpi_ev = true;
3326 return true; 3527 return true;
3327 3528
@@ -3339,30 +3540,30 @@ static bool hotkey_notify_thermal(const u32 hkey,
3339 *ignore_acpi_ev = false; 3540 *ignore_acpi_ev = false;
3340 3541
3341 switch (hkey) { 3542 switch (hkey) {
3342 case 0x6011: 3543 case TP_HKEY_EV_ALARM_BAT_HOT:
3343 printk(TPACPI_CRIT 3544 printk(TPACPI_CRIT
3344 "THERMAL ALARM: battery is too hot!\n"); 3545 "THERMAL ALARM: battery is too hot!\n");
3345 /* recommended action: warn user through gui */ 3546 /* recommended action: warn user through gui */
3346 return true; 3547 return true;
3347 case 0x6012: 3548 case TP_HKEY_EV_ALARM_BAT_XHOT:
3348 printk(TPACPI_ALERT 3549 printk(TPACPI_ALERT
3349 "THERMAL EMERGENCY: battery is extremely hot!\n"); 3550 "THERMAL EMERGENCY: battery is extremely hot!\n");
3350 /* recommended action: immediate sleep/hibernate */ 3551 /* recommended action: immediate sleep/hibernate */
3351 return true; 3552 return true;
3352 case 0x6021: 3553 case TP_HKEY_EV_ALARM_SENSOR_HOT:
3353 printk(TPACPI_CRIT 3554 printk(TPACPI_CRIT
3354 "THERMAL ALARM: " 3555 "THERMAL ALARM: "
3355 "a sensor reports something is too hot!\n"); 3556 "a sensor reports something is too hot!\n");
3356 /* recommended action: warn user through gui, that */ 3557 /* recommended action: warn user through gui, that */
3357 /* some internal component is too hot */ 3558 /* some internal component is too hot */
3358 return true; 3559 return true;
3359 case 0x6022: 3560 case TP_HKEY_EV_ALARM_SENSOR_XHOT:
3360 printk(TPACPI_ALERT 3561 printk(TPACPI_ALERT
3361 "THERMAL EMERGENCY: " 3562 "THERMAL EMERGENCY: "
3362 "a sensor reports something is extremely hot!\n"); 3563 "a sensor reports something is extremely hot!\n");
3363 /* recommended action: immediate sleep/hibernate */ 3564 /* recommended action: immediate sleep/hibernate */
3364 return true; 3565 return true;
3365 case 0x6030: 3566 case TP_HKEY_EV_THM_TABLE_CHANGED:
3366 printk(TPACPI_INFO 3567 printk(TPACPI_INFO
3367 "EC reports that Thermal Table has changed\n"); 3568 "EC reports that Thermal Table has changed\n");
3368 /* recommended action: do nothing, we don't have 3569 /* recommended action: do nothing, we don't have
@@ -3420,7 +3621,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3420 break; 3621 break;
3421 case 3: 3622 case 3:
3422 /* 0x3000-0x3FFF: bay-related wakeups */ 3623 /* 0x3000-0x3FFF: bay-related wakeups */
3423 if (hkey == 0x3003) { 3624 if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
3424 hotkey_autosleep_ack = 1; 3625 hotkey_autosleep_ack = 1;
3425 printk(TPACPI_INFO 3626 printk(TPACPI_INFO
3426 "bay ejected\n"); 3627 "bay ejected\n");
@@ -3432,7 +3633,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3432 break; 3633 break;
3433 case 4: 3634 case 4:
3434 /* 0x4000-0x4FFF: dock-related wakeups */ 3635 /* 0x4000-0x4FFF: dock-related wakeups */
3435 if (hkey == 0x4003) { 3636 if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
3436 hotkey_autosleep_ack = 1; 3637 hotkey_autosleep_ack = 1;
3437 printk(TPACPI_INFO 3638 printk(TPACPI_INFO
3438 "undocked\n"); 3639 "undocked\n");
@@ -3454,7 +3655,8 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3454 break; 3655 break;
3455 case 7: 3656 case 7:
3456 /* 0x7000-0x7FFF: misc */ 3657 /* 0x7000-0x7FFF: misc */
3457 if (tp_features.hotkey_wlsw && hkey == 0x7000) { 3658 if (tp_features.hotkey_wlsw &&
3659 hkey == TP_HKEY_EV_RFKILL_CHANGED) {
3458 tpacpi_send_radiosw_update(); 3660 tpacpi_send_radiosw_update();
3459 send_acpi_ev = 0; 3661 send_acpi_ev = 0;
3460 known_ev = true; 3662 known_ev = true;
@@ -3500,10 +3702,12 @@ static void hotkey_resume(void)
3500{ 3702{
3501 tpacpi_disable_brightness_delay(); 3703 tpacpi_disable_brightness_delay();
3502 3704
3503 if (hotkey_mask_get()) 3705 if (hotkey_status_set(true) < 0 ||
3706 hotkey_mask_set(hotkey_acpi_mask) < 0)
3504 printk(TPACPI_ERR 3707 printk(TPACPI_ERR
3505 "error while trying to read hot key mask " 3708 "error while attempting to reset the event "
3506 "from firmware\n"); 3709 "firmware interface\n");
3710
3507 tpacpi_send_radiosw_update(); 3711 tpacpi_send_radiosw_update();
3508 hotkey_tablet_mode_notify_change(); 3712 hotkey_tablet_mode_notify_change();
3509 hotkey_wakeup_reason_notify_change(); 3713 hotkey_wakeup_reason_notify_change();
@@ -3532,8 +3736,8 @@ static int hotkey_read(char *p)
3532 return res; 3736 return res;
3533 3737
3534 len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0)); 3738 len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
3535 if (tp_features.hotkey_mask) { 3739 if (hotkey_all_mask) {
3536 len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask); 3740 len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
3537 len += sprintf(p + len, 3741 len += sprintf(p + len,
3538 "commands:\tenable, disable, reset, <mask>\n"); 3742 "commands:\tenable, disable, reset, <mask>\n");
3539 } else { 3743 } else {
@@ -3570,7 +3774,7 @@ static int hotkey_write(char *buf)
3570 if (mutex_lock_killable(&hotkey_mutex)) 3774 if (mutex_lock_killable(&hotkey_mutex))
3571 return -ERESTARTSYS; 3775 return -ERESTARTSYS;
3572 3776
3573 mask = hotkey_mask; 3777 mask = hotkey_user_mask;
3574 3778
3575 res = 0; 3779 res = 0;
3576 while ((cmd = next_cmd(&buf))) { 3780 while ((cmd = next_cmd(&buf))) {
@@ -3592,12 +3796,11 @@ static int hotkey_write(char *buf)
3592 } 3796 }
3593 } 3797 }
3594 3798
3595 if (!res) 3799 if (!res) {
3596 tpacpi_disclose_usertask("procfs hotkey", 3800 tpacpi_disclose_usertask("procfs hotkey",
3597 "set mask to 0x%08x\n", mask); 3801 "set mask to 0x%08x\n", mask);
3598 3802 res = hotkey_user_mask_set(mask);
3599 if (!res && mask != hotkey_mask) 3803 }
3600 res = hotkey_mask_set(mask);
3601 3804
3602errexit: 3805errexit:
3603 mutex_unlock(&hotkey_mutex); 3806 mutex_unlock(&hotkey_mutex);
@@ -6010,8 +6213,10 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6010 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL, 6213 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
6011 &ibm_backlight_data); 6214 &ibm_backlight_data);
6012 if (IS_ERR(ibm_backlight_device)) { 6215 if (IS_ERR(ibm_backlight_device)) {
6216 int rc = PTR_ERR(ibm_backlight_device);
6217 ibm_backlight_device = NULL;
6013 printk(TPACPI_ERR "Could not register backlight device\n"); 6218 printk(TPACPI_ERR "Could not register backlight device\n");
6014 return PTR_ERR(ibm_backlight_device); 6219 return rc;
6015 } 6220 }
6016 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 6221 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6017 "brightness is supported\n"); 6222 "brightness is supported\n");
@@ -7499,6 +7704,21 @@ static struct ibm_struct fan_driver_data = {
7499 **************************************************************************** 7704 ****************************************************************************
7500 ****************************************************************************/ 7705 ****************************************************************************/
7501 7706
7707/*
7708 * HKEY event callout for other subdrivers go here
7709 * (yes, it is ugly, but it is quick, safe, and gets the job done
7710 */
7711static void tpacpi_driver_event(const unsigned int hkey_event)
7712{
7713}
7714
7715
7716
7717static void hotkey_driver_event(const unsigned int scancode)
7718{
7719 tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
7720}
7721
7502/* sysfs name ---------------------------------------------------------- */ 7722/* sysfs name ---------------------------------------------------------- */
7503static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, 7723static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
7504 struct device_attribute *attr, 7724 struct device_attribute *attr,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c07fdb94d665..83b8b5ac49c9 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -153,6 +153,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
153 acpi_handle temp = NULL; 153 acpi_handle temp = NULL;
154 acpi_status status; 154 acpi_status status;
155 struct pnp_dev *dev; 155 struct pnp_dev *dev;
156 struct acpi_hardware_id *id;
156 157
157 /* 158 /*
158 * If a PnPacpi device is not present , the device 159 * If a PnPacpi device is not present , the device
@@ -193,15 +194,12 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
193 if (dev->capabilities & PNP_CONFIGURABLE) 194 if (dev->capabilities & PNP_CONFIGURABLE)
194 pnpacpi_parse_resource_option_data(dev); 195 pnpacpi_parse_resource_option_data(dev);
195 196
196 if (device->flags.compatible_ids) { 197 list_for_each_entry(id, &device->pnp.ids, list) {
197 struct acpica_device_id_list *cid_list = device->pnp.cid_list; 198 if (!strcmp(id->id, acpi_device_hid(device)))
198 int i; 199 continue;
199 200 if (!ispnpidacpi(id->id))
200 for (i = 0; i < cid_list->count; i++) { 201 continue;
201 if (!ispnpidacpi(cid_list->ids[i].string)) 202 pnp_add_id(dev, id->id);
202 continue;
203 pnp_add_id(dev, cid_list->ids[i].string);
204 }
205 } 203 }
206 204
207 /* clear out the damaged flags */ 205 /* clear out the damaged flags */
@@ -232,9 +230,8 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
232 struct pnp_dev *pnp = _pnp; 230 struct pnp_dev *pnp = _pnp;
233 231
234 /* true means it matched */ 232 /* true means it matched */
235 return acpi->flags.hardware_id 233 return !acpi_get_physical_device(acpi->handle)
236 && !acpi_get_physical_device(acpi->handle) 234 && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
237 && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
238} 235}
239 236
240static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) 237static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 614b3a764fed..3441b3f90827 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -26,7 +26,6 @@
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <scsi/scsi.h> 28#include <scsi/scsi.h>
29#include <linux/kref.h>
30#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
31#include <linux/cdev.h> 30#include <linux/cdev.h>
32#include <net/netlink.h> 31#include <net/netlink.h>
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index 273de5d0934e..0ab990744830 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -43,7 +43,6 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/spinlock.h> 45#include <linux/spinlock.h>
46#include <linux/kref.h>
47 46
48#include "sisusb.h" 47#include "sisusb.h"
49 48
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 90861cd93165..09bfa9662e4d 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -31,6 +31,13 @@ config LCD_CORGI
31 Say y here to support the LCD panels usually found on SHARP 31 Say y here to support the LCD panels usually found on SHARP
32 corgi (C7x0) and spitz (Cxx00) models. 32 corgi (C7x0) and spitz (Cxx00) models.
33 33
34config LCD_LMS283GF05
35 tristate "Samsung LMS283GF05 LCD"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
37 help
38 SPI driver for Samsung LMS283GF05. This provides basic support
39 for powering the LCD up/down through a sysfs interface.
40
34config LCD_LTV350QV 41config LCD_LTV350QV
35 tristate "Samsung LTV350QV LCD Panel" 42 tristate "Samsung LTV350QV LCD Panel"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER 43 depends on LCD_CLASS_DEVICE && SPI_MASTER
@@ -229,3 +236,29 @@ config BACKLIGHT_SAHARA
229 help 236 help
230 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the 237 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
231 backlight driver. 238 backlight driver.
239
240config BACKLIGHT_WM831X
241 tristate "WM831x PMIC Backlight Driver"
242 depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X
243 help
244 If you have a backlight driven by the ISINK and DCDC of a
245 WM831x PMIC say y to enable the backlight driver for it.
246
247config BACKLIGHT_ADX
248 tristate "Avionic Design Xanthos Backlight Driver"
249 depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX
250 default y
251 help
252 Say Y to enable the backlight driver on Avionic Design Xanthos-based
253 boards.
254
255config BACKLIGHT_ADP5520
256 tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
257 depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520
258 help
259 If you have a LCD backlight connected to the BST/BL_SNK output of
260 ADP5520 or ADP5501, say Y here to enable this driver.
261
262 To compile this driver as a module, choose M here: the module will
263 be called adp5520_bl.
264
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 4eb178c1d684..9a405548874c 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,6 +3,7 @@
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o 4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o 5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
6obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
6obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o 7obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
7obj-$(CONFIG_LCD_ILI9320) += ili9320.o 8obj-$(CONFIG_LCD_ILI9320) += ili9320.o
8obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o 9obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
@@ -24,4 +25,7 @@ obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
24obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 25obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
25obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o 26obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
26obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o 27obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
28obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
29obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
30obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
27 31
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
new file mode 100644
index 000000000000..ad05da5ba3c7
--- /dev/null
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -0,0 +1,377 @@
1/*
2 * Backlight driver for Analog Devices ADP5520/ADP5501 MFD PMICs
3 *
4 * Copyright 2009 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/fb.h>
13#include <linux/backlight.h>
14#include <linux/mfd/adp5520.h>
15
16struct adp5520_bl {
17 struct device *master;
18 struct adp5520_backlight_platfrom_data *pdata;
19 struct mutex lock;
20 unsigned long cached_daylight_max;
21 int id;
22 int current_brightness;
23};
24
25static int adp5520_bl_set(struct backlight_device *bl, int brightness)
26{
27 struct adp5520_bl *data = bl_get_data(bl);
28 struct device *master = data->master;
29 int ret = 0;
30
31 if (data->pdata->en_ambl_sens) {
32 if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) {
33 /* Disable Ambient Light auto adjust */
34 ret |= adp5520_clr_bits(master, BL_CONTROL,
35 BL_AUTO_ADJ);
36 ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
37 } else {
38 /*
39 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
40 * restore daylight l3 sysfs brightness
41 */
42 ret |= adp5520_write(master, DAYLIGHT_MAX,
43 data->cached_daylight_max);
44 ret |= adp5520_set_bits(master, BL_CONTROL,
45 BL_AUTO_ADJ);
46 }
47 } else {
48 ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
49 }
50
51 if (data->current_brightness && brightness == 0)
52 ret |= adp5520_set_bits(master,
53 MODE_STATUS, DIM_EN);
54 else if (data->current_brightness == 0 && brightness)
55 ret |= adp5520_clr_bits(master,
56 MODE_STATUS, DIM_EN);
57
58 if (!ret)
59 data->current_brightness = brightness;
60
61 return ret;
62}
63
64static int adp5520_bl_update_status(struct backlight_device *bl)
65{
66 int brightness = bl->props.brightness;
67 if (bl->props.power != FB_BLANK_UNBLANK)
68 brightness = 0;
69
70 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
71 brightness = 0;
72
73 return adp5520_bl_set(bl, brightness);
74}
75
76static int adp5520_bl_get_brightness(struct backlight_device *bl)
77{
78 struct adp5520_bl *data = bl_get_data(bl);
79 int error;
80 uint8_t reg_val;
81
82 error = adp5520_read(data->master, BL_VALUE, &reg_val);
83
84 return error ? data->current_brightness : reg_val;
85}
86
87static struct backlight_ops adp5520_bl_ops = {
88 .update_status = adp5520_bl_update_status,
89 .get_brightness = adp5520_bl_get_brightness,
90};
91
92static int adp5520_bl_setup(struct backlight_device *bl)
93{
94 struct adp5520_bl *data = bl_get_data(bl);
95 struct device *master = data->master;
96 struct adp5520_backlight_platfrom_data *pdata = data->pdata;
97 int ret = 0;
98
99 ret |= adp5520_write(master, DAYLIGHT_MAX, pdata->l1_daylight_max);
100 ret |= adp5520_write(master, DAYLIGHT_DIM, pdata->l1_daylight_dim);
101
102 if (pdata->en_ambl_sens) {
103 data->cached_daylight_max = pdata->l1_daylight_max;
104 ret |= adp5520_write(master, OFFICE_MAX, pdata->l2_office_max);
105 ret |= adp5520_write(master, OFFICE_DIM, pdata->l2_office_dim);
106 ret |= adp5520_write(master, DARK_MAX, pdata->l3_dark_max);
107 ret |= adp5520_write(master, DARK_DIM, pdata->l3_dark_dim);
108 ret |= adp5520_write(master, L2_TRIP, pdata->l2_trip);
109 ret |= adp5520_write(master, L2_HYS, pdata->l2_hyst);
110 ret |= adp5520_write(master, L3_TRIP, pdata->l3_trip);
111 ret |= adp5520_write(master, L3_HYS, pdata->l3_hyst);
112 ret |= adp5520_write(master, ALS_CMPR_CFG,
113 ALS_CMPR_CFG_VAL(pdata->abml_filt, L3_EN));
114 }
115
116 ret |= adp5520_write(master, BL_CONTROL,
117 BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens));
118
119 ret |= adp5520_write(master, BL_FADE, FADE_VAL(pdata->fade_in,
120 pdata->fade_out));
121
122 ret |= adp5520_set_bits(master, MODE_STATUS, BL_EN | DIM_EN);
123
124 return ret;
125}
126
127static ssize_t adp5520_show(struct device *dev, char *buf, int reg)
128{
129 struct adp5520_bl *data = dev_get_drvdata(dev);
130 int error;
131 uint8_t reg_val;
132
133 mutex_lock(&data->lock);
134 error = adp5520_read(data->master, reg, &reg_val);
135 mutex_unlock(&data->lock);
136
137 return sprintf(buf, "%u\n", reg_val);
138}
139
140static ssize_t adp5520_store(struct device *dev, const char *buf,
141 size_t count, int reg)
142{
143 struct adp5520_bl *data = dev_get_drvdata(dev);
144 unsigned long val;
145 int ret;
146
147 ret = strict_strtoul(buf, 10, &val);
148 if (ret)
149 return ret;
150
151 mutex_lock(&data->lock);
152 adp5520_write(data->master, reg, val);
153 mutex_unlock(&data->lock);
154
155 return count;
156}
157
158static ssize_t adp5520_bl_dark_max_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 return adp5520_show(dev, buf, DARK_MAX);
162}
163
164static ssize_t adp5520_bl_dark_max_store(struct device *dev,
165 struct device_attribute *attr, const char *buf, size_t count)
166{
167 return adp5520_store(dev, buf, count, DARK_MAX);
168}
169static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show,
170 adp5520_bl_dark_max_store);
171
172static ssize_t adp5520_bl_office_max_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 return adp5520_show(dev, buf, OFFICE_MAX);
176}
177
178static ssize_t adp5520_bl_office_max_store(struct device *dev,
179 struct device_attribute *attr, const char *buf, size_t count)
180{
181 return adp5520_store(dev, buf, count, OFFICE_MAX);
182}
183static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
184 adp5520_bl_office_max_store);
185
186static ssize_t adp5520_bl_daylight_max_show(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 return adp5520_show(dev, buf, DAYLIGHT_MAX);
190}
191
192static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
194{
195 struct adp5520_bl *data = dev_get_drvdata(dev);
196
197 strict_strtoul(buf, 10, &data->cached_daylight_max);
198 return adp5520_store(dev, buf, count, DAYLIGHT_MAX);
199}
200static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
201 adp5520_bl_daylight_max_store);
202
203static ssize_t adp5520_bl_dark_dim_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 return adp5520_show(dev, buf, DARK_DIM);
207}
208
209static ssize_t adp5520_bl_dark_dim_store(struct device *dev,
210 struct device_attribute *attr,
211 const char *buf, size_t count)
212{
213 return adp5520_store(dev, buf, count, DARK_DIM);
214}
215static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
216 adp5520_bl_dark_dim_store);
217
218static ssize_t adp5520_bl_office_dim_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 return adp5520_show(dev, buf, OFFICE_DIM);
222}
223
224static ssize_t adp5520_bl_office_dim_store(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 return adp5520_store(dev, buf, count, OFFICE_DIM);
229}
230static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show,
231 adp5520_bl_office_dim_store);
232
233static ssize_t adp5520_bl_daylight_dim_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 return adp5520_show(dev, buf, DAYLIGHT_DIM);
237}
238
239static ssize_t adp5520_bl_daylight_dim_store(struct device *dev,
240 struct device_attribute *attr,
241 const char *buf, size_t count)
242{
243 return adp5520_store(dev, buf, count, DAYLIGHT_DIM);
244}
245static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show,
246 adp5520_bl_daylight_dim_store);
247
248static struct attribute *adp5520_bl_attributes[] = {
249 &dev_attr_dark_max.attr,
250 &dev_attr_dark_dim.attr,
251 &dev_attr_office_max.attr,
252 &dev_attr_office_dim.attr,
253 &dev_attr_daylight_max.attr,
254 &dev_attr_daylight_dim.attr,
255 NULL
256};
257
258static const struct attribute_group adp5520_bl_attr_group = {
259 .attrs = adp5520_bl_attributes,
260};
261
262static int __devinit adp5520_bl_probe(struct platform_device *pdev)
263{
264 struct backlight_device *bl;
265 struct adp5520_bl *data;
266 int ret = 0;
267
268 data = kzalloc(sizeof(*data), GFP_KERNEL);
269 if (data == NULL)
270 return -ENOMEM;
271
272 data->master = pdev->dev.parent;
273 data->pdata = pdev->dev.platform_data;
274
275 if (data->pdata == NULL) {
276 dev_err(&pdev->dev, "missing platform data\n");
277 kfree(data);
278 return -ENODEV;
279 }
280
281 data->id = pdev->id;
282 data->current_brightness = 0;
283
284 mutex_init(&data->lock);
285
286 bl = backlight_device_register(pdev->name, data->master,
287 data, &adp5520_bl_ops);
288 if (IS_ERR(bl)) {
289 dev_err(&pdev->dev, "failed to register backlight\n");
290 kfree(data);
291 return PTR_ERR(bl);
292 }
293
294 bl->props.max_brightness =
295 bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
296
297 if (data->pdata->en_ambl_sens)
298 ret = sysfs_create_group(&bl->dev.kobj,
299 &adp5520_bl_attr_group);
300
301 if (ret) {
302 dev_err(&pdev->dev, "failed to register sysfs\n");
303 backlight_device_unregister(bl);
304 kfree(data);
305 }
306
307 platform_set_drvdata(pdev, bl);
308 ret |= adp5520_bl_setup(bl);
309 backlight_update_status(bl);
310
311 return ret;
312}
313
314static int __devexit adp5520_bl_remove(struct platform_device *pdev)
315{
316 struct backlight_device *bl = platform_get_drvdata(pdev);
317 struct adp5520_bl *data = bl_get_data(bl);
318
319 adp5520_clr_bits(data->master, MODE_STATUS, BL_EN);
320
321 if (data->pdata->en_ambl_sens)
322 sysfs_remove_group(&bl->dev.kobj,
323 &adp5520_bl_attr_group);
324
325 backlight_device_unregister(bl);
326 kfree(data);
327
328 return 0;
329}
330
331#ifdef CONFIG_PM
332static int adp5520_bl_suspend(struct platform_device *pdev,
333 pm_message_t state)
334{
335 struct backlight_device *bl = platform_get_drvdata(pdev);
336 return adp5520_bl_set(bl, 0);
337}
338
339static int adp5520_bl_resume(struct platform_device *pdev)
340{
341 struct backlight_device *bl = platform_get_drvdata(pdev);
342
343 backlight_update_status(bl);
344 return 0;
345}
346#else
347#define adp5520_bl_suspend NULL
348#define adp5520_bl_resume NULL
349#endif
350
351static struct platform_driver adp5520_bl_driver = {
352 .driver = {
353 .name = "adp5520-backlight",
354 .owner = THIS_MODULE,
355 },
356 .probe = adp5520_bl_probe,
357 .remove = __devexit_p(adp5520_bl_remove),
358 .suspend = adp5520_bl_suspend,
359 .resume = adp5520_bl_resume,
360};
361
362static int __init adp5520_bl_init(void)
363{
364 return platform_driver_register(&adp5520_bl_driver);
365}
366module_init(adp5520_bl_init);
367
368static void __exit adp5520_bl_exit(void)
369{
370 platform_driver_unregister(&adp5520_bl_driver);
371}
372module_exit(adp5520_bl_exit);
373
374MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
375MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
376MODULE_LICENSE("GPL");
377MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
new file mode 100644
index 000000000000..2c3bdfc620b7
--- /dev/null
+++ b/drivers/video/backlight/adx_bl.c
@@ -0,0 +1,178 @@
1/*
2 * linux/drivers/video/backlight/adx.c
3 *
4 * Copyright (C) 2009 Avionic Design GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by Thierry Reding <thierry.reding@avionic-design.de>
11 */
12
13#include <linux/backlight.h>
14#include <linux/fb.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18
19/* register definitions */
20#define ADX_BACKLIGHT_CONTROL 0x00
21#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0)
22#define ADX_BACKLIGHT_BRIGHTNESS 0x08
23#define ADX_BACKLIGHT_STATUS 0x10
24#define ADX_BACKLIGHT_ERROR 0x18
25
26struct adxbl {
27 void __iomem *base;
28};
29
30static int adx_backlight_update_status(struct backlight_device *bldev)
31{
32 struct adxbl *bl = bl_get_data(bldev);
33 u32 value;
34
35 value = bldev->props.brightness;
36 writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
37
38 value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
39
40 if (bldev->props.state & BL_CORE_FBBLANK)
41 value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
42 else
43 value |= ADX_BACKLIGHT_CONTROL_ENABLE;
44
45 writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
46
47 return 0;
48}
49
50static int adx_backlight_get_brightness(struct backlight_device *bldev)
51{
52 struct adxbl *bl = bl_get_data(bldev);
53 u32 brightness;
54
55 brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
56 return brightness & 0xff;
57}
58
59static int adx_backlight_check_fb(struct fb_info *fb)
60{
61 return 1;
62}
63
64static struct backlight_ops adx_backlight_ops = {
65 .options = 0,
66 .update_status = adx_backlight_update_status,
67 .get_brightness = adx_backlight_get_brightness,
68 .check_fb = adx_backlight_check_fb,
69};
70
71static int __devinit adx_backlight_probe(struct platform_device *pdev)
72{
73 struct backlight_device *bldev;
74 struct resource *res;
75 struct adxbl *bl;
76 int ret = 0;
77
78 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!res) {
80 ret = -ENXIO;
81 goto out;
82 }
83
84 res = devm_request_mem_region(&pdev->dev, res->start,
85 resource_size(res), res->name);
86 if (!res) {
87 ret = -ENXIO;
88 goto out;
89 }
90
91 bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
92 if (!bl) {
93 ret = -ENOMEM;
94 goto out;
95 }
96
97 bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
98 resource_size(res));
99 if (!bl->base) {
100 ret = -ENXIO;
101 goto out;
102 }
103
104 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl,
105 &adx_backlight_ops);
106 if (!bldev) {
107 ret = -ENOMEM;
108 goto out;
109 }
110
111 bldev->props.max_brightness = 0xff;
112 bldev->props.brightness = 0xff;
113 bldev->props.power = FB_BLANK_UNBLANK;
114
115 platform_set_drvdata(pdev, bldev);
116
117out:
118 return ret;
119}
120
121static int __devexit adx_backlight_remove(struct platform_device *pdev)
122{
123 struct backlight_device *bldev;
124 int ret = 0;
125
126 bldev = platform_get_drvdata(pdev);
127 bldev->props.power = FB_BLANK_UNBLANK;
128 bldev->props.brightness = 0xff;
129 backlight_update_status(bldev);
130 backlight_device_unregister(bldev);
131 platform_set_drvdata(pdev, NULL);
132
133 return ret;
134}
135
136#ifdef CONFIG_PM
137static int adx_backlight_suspend(struct platform_device *pdev,
138 pm_message_t state)
139{
140 return 0;
141}
142
143static int adx_backlight_resume(struct platform_device *pdev)
144{
145 return 0;
146}
147#else
148#define adx_backlight_suspend NULL
149#define adx_backlight_resume NULL
150#endif
151
152static struct platform_driver adx_backlight_driver = {
153 .probe = adx_backlight_probe,
154 .remove = __devexit_p(adx_backlight_remove),
155 .suspend = adx_backlight_suspend,
156 .resume = adx_backlight_resume,
157 .driver = {
158 .name = "adx-backlight",
159 .owner = THIS_MODULE,
160 },
161};
162
163static int __init adx_backlight_init(void)
164{
165 return platform_driver_register(&adx_backlight_driver);
166}
167
168static void __exit adx_backlight_exit(void)
169{
170 platform_driver_unregister(&adx_backlight_driver);
171}
172
173module_init(adx_backlight_init);
174module_exit(adx_backlight_exit);
175
176MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
177MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
178MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 157057c79ca3..6615ac7fa60a 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -73,6 +73,27 @@ static inline void backlight_unregister_fb(struct backlight_device *bd)
73} 73}
74#endif /* CONFIG_FB */ 74#endif /* CONFIG_FB */
75 75
76static void backlight_generate_event(struct backlight_device *bd,
77 enum backlight_update_reason reason)
78{
79 char *envp[2];
80
81 switch (reason) {
82 case BACKLIGHT_UPDATE_SYSFS:
83 envp[0] = "SOURCE=sysfs";
84 break;
85 case BACKLIGHT_UPDATE_HOTKEY:
86 envp[0] = "SOURCE=hotkey";
87 break;
88 default:
89 envp[0] = "SOURCE=unknown";
90 break;
91 }
92 envp[1] = NULL;
93 kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp);
94 sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness");
95}
96
76static ssize_t backlight_show_power(struct device *dev, 97static ssize_t backlight_show_power(struct device *dev,
77 struct device_attribute *attr,char *buf) 98 struct device_attribute *attr,char *buf)
78{ 99{
@@ -142,6 +163,8 @@ static ssize_t backlight_store_brightness(struct device *dev,
142 } 163 }
143 mutex_unlock(&bd->ops_lock); 164 mutex_unlock(&bd->ops_lock);
144 165
166 backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS);
167
145 return rc; 168 return rc;
146} 169}
147 170
@@ -214,6 +237,25 @@ static struct device_attribute bl_device_attributes[] = {
214}; 237};
215 238
216/** 239/**
240 * backlight_force_update - tell the backlight subsystem that hardware state
241 * has changed
242 * @bd: the backlight device to update
243 *
244 * Updates the internal state of the backlight in response to a hardware event,
245 * and generate a uevent to notify userspace
246 */
247void backlight_force_update(struct backlight_device *bd,
248 enum backlight_update_reason reason)
249{
250 mutex_lock(&bd->ops_lock);
251 if (bd->ops && bd->ops->get_brightness)
252 bd->props.brightness = bd->ops->get_brightness(bd);
253 mutex_unlock(&bd->ops_lock);
254 backlight_generate_event(bd, reason);
255}
256EXPORT_SYMBOL(backlight_force_update);
257
258/**
217 * backlight_device_register - create and register a new object of 259 * backlight_device_register - create and register a new object of
218 * backlight_device class. 260 * backlight_device class.
219 * @name: the name of the new object(must be the same as the name of the 261 * @name: the name of the new object(must be the same as the name of the
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 5be55a20d8c7..7fb4eefff80d 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -103,7 +103,7 @@ static struct backlight_ops hp680bl_ops = {
103 .update_status = hp680bl_set_intensity, 103 .update_status = hp680bl_set_intensity,
104}; 104};
105 105
106static int __init hp680bl_probe(struct platform_device *pdev) 106static int __devinit hp680bl_probe(struct platform_device *pdev)
107{ 107{
108 struct backlight_device *bd; 108 struct backlight_device *bd;
109 109
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
new file mode 100644
index 000000000000..447b542a20ca
--- /dev/null
+++ b/drivers/video/backlight/lms283gf05.c
@@ -0,0 +1,242 @@
1/*
2 * lms283gf05.c -- support for Samsung LMS283GF05 LCD
3 *
4 * Copyright (c) 2009 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/device.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/lcd.h>
16
17#include <linux/spi/spi.h>
18#include <linux/spi/lms283gf05.h>
19
20struct lms283gf05_state {
21 struct spi_device *spi;
22 struct lcd_device *ld;
23};
24
25struct lms283gf05_seq {
26 unsigned char reg;
27 unsigned short value;
28 unsigned char delay;
29};
30
31/* Magic sequences supplied by manufacturer, for details refer to datasheet */
32static struct lms283gf05_seq disp_initseq[] = {
33 /* REG, VALUE, DELAY */
34 { 0x07, 0x0000, 0 },
35 { 0x13, 0x0000, 10 },
36
37 { 0x11, 0x3004, 0 },
38 { 0x14, 0x200F, 0 },
39 { 0x10, 0x1a20, 0 },
40 { 0x13, 0x0040, 50 },
41
42 { 0x13, 0x0060, 0 },
43 { 0x13, 0x0070, 200 },
44
45 { 0x01, 0x0127, 0 },
46 { 0x02, 0x0700, 0 },
47 { 0x03, 0x1030, 0 },
48 { 0x08, 0x0208, 0 },
49 { 0x0B, 0x0620, 0 },
50 { 0x0C, 0x0110, 0 },
51 { 0x30, 0x0120, 0 },
52 { 0x31, 0x0127, 0 },
53 { 0x32, 0x0000, 0 },
54 { 0x33, 0x0503, 0 },
55 { 0x34, 0x0727, 0 },
56 { 0x35, 0x0124, 0 },
57 { 0x36, 0x0706, 0 },
58 { 0x37, 0x0701, 0 },
59 { 0x38, 0x0F00, 0 },
60 { 0x39, 0x0F00, 0 },
61 { 0x40, 0x0000, 0 },
62 { 0x41, 0x0000, 0 },
63 { 0x42, 0x013f, 0 },
64 { 0x43, 0x0000, 0 },
65 { 0x44, 0x013f, 0 },
66 { 0x45, 0x0000, 0 },
67 { 0x46, 0xef00, 0 },
68 { 0x47, 0x013f, 0 },
69 { 0x48, 0x0000, 0 },
70 { 0x07, 0x0015, 30 },
71
72 { 0x07, 0x0017, 0 },
73
74 { 0x20, 0x0000, 0 },
75 { 0x21, 0x0000, 0 },
76 { 0x22, 0x0000, 0 }
77};
78
79static struct lms283gf05_seq disp_pdwnseq[] = {
80 { 0x07, 0x0016, 30 },
81
82 { 0x07, 0x0004, 0 },
83 { 0x10, 0x0220, 20 },
84
85 { 0x13, 0x0060, 50 },
86
87 { 0x13, 0x0040, 50 },
88
89 { 0x13, 0x0000, 0 },
90 { 0x10, 0x0000, 0 }
91};
92
93
94static void lms283gf05_reset(unsigned long gpio, bool inverted)
95{
96 gpio_set_value(gpio, !inverted);
97 mdelay(100);
98 gpio_set_value(gpio, inverted);
99 mdelay(20);
100 gpio_set_value(gpio, !inverted);
101 mdelay(20);
102}
103
104static void lms283gf05_toggle(struct spi_device *spi,
105 struct lms283gf05_seq *seq, int sz)
106{
107 char buf[3];
108 int i;
109
110 for (i = 0; i < sz; i++) {
111 buf[0] = 0x74;
112 buf[1] = 0x00;
113 buf[2] = seq[i].reg;
114 spi_write(spi, buf, 3);
115
116 buf[0] = 0x76;
117 buf[1] = seq[i].value >> 8;
118 buf[2] = seq[i].value & 0xff;
119 spi_write(spi, buf, 3);
120
121 mdelay(seq[i].delay);
122 }
123}
124
125static int lms283gf05_power_set(struct lcd_device *ld, int power)
126{
127 struct lms283gf05_state *st = lcd_get_data(ld);
128 struct spi_device *spi = st->spi;
129 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
130
131 if (power) {
132 if (pdata)
133 lms283gf05_reset(pdata->reset_gpio,
134 pdata->reset_inverted);
135 lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
136 } else {
137 lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq));
138 if (pdata)
139 gpio_set_value(pdata->reset_gpio,
140 pdata->reset_inverted);
141 }
142
143 return 0;
144}
145
146static struct lcd_ops lms_ops = {
147 .set_power = lms283gf05_power_set,
148 .get_power = NULL,
149};
150
151static int __devinit lms283gf05_probe(struct spi_device *spi)
152{
153 struct lms283gf05_state *st;
154 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
155 struct lcd_device *ld;
156 int ret = 0;
157
158 if (pdata != NULL) {
159 ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET");
160 if (ret)
161 return ret;
162
163 ret = gpio_direction_output(pdata->reset_gpio,
164 !pdata->reset_inverted);
165 if (ret)
166 goto err;
167 }
168
169 st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
170 if (st == NULL) {
171 dev_err(&spi->dev, "No memory for device state\n");
172 ret = -ENOMEM;
173 goto err;
174 }
175
176 ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
177 if (IS_ERR(ld)) {
178 ret = PTR_ERR(ld);
179 goto err2;
180 }
181
182 st->spi = spi;
183 st->ld = ld;
184
185 dev_set_drvdata(&spi->dev, st);
186
187 /* kick in the LCD */
188 if (pdata)
189 lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted);
190 lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
191
192 return 0;
193
194err2:
195 kfree(st);
196err:
197 if (pdata != NULL)
198 gpio_free(pdata->reset_gpio);
199
200 return ret;
201}
202
203static int __devexit lms283gf05_remove(struct spi_device *spi)
204{
205 struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
206 struct lms283gf05_pdata *pdata = st->spi->dev.platform_data;
207
208 lcd_device_unregister(st->ld);
209
210 if (pdata != NULL)
211 gpio_free(pdata->reset_gpio);
212
213 kfree(st);
214
215 return 0;
216}
217
218static struct spi_driver lms283gf05_driver = {
219 .driver = {
220 .name = "lms283gf05",
221 .owner = THIS_MODULE,
222 },
223 .probe = lms283gf05_probe,
224 .remove = __devexit_p(lms283gf05_remove),
225};
226
227static __init int lms283gf05_init(void)
228{
229 return spi_register_driver(&lms283gf05_driver);
230}
231
232static __exit void lms283gf05_exit(void)
233{
234 spi_unregister_driver(&lms283gf05_driver);
235}
236
237module_init(lms283gf05_init);
238module_exit(lms283gf05_exit);
239
240MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
241MODULE_DESCRIPTION("LCD283GF05 LCD");
242MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 3bb4c0a50c62..9edb8d7c295f 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -166,6 +166,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
166 }, 166 },
167 { 167 {
168 .callback = mbp_dmi_match, 168 .callback = mbp_dmi_match,
169 .ident = "MacBookAir 1,1",
170 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
172 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
173 },
174 .driver_data = (void *)&intel_chipset_data,
175 },
176 {
177 .callback = mbp_dmi_match,
169 .ident = "MacBook 5,1", 178 .ident = "MacBook 5,1",
170 .matches = { 179 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -175,6 +184,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
175 }, 184 },
176 { 185 {
177 .callback = mbp_dmi_match, 186 .callback = mbp_dmi_match,
187 .ident = "MacBook 5,2",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
190 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
191 },
192 .driver_data = (void *)&nvidia_chipset_data,
193 },
194 {
195 .callback = mbp_dmi_match,
178 .ident = "MacBookAir 2,1", 196 .ident = "MacBookAir 2,1",
179 .matches = { 197 .matches = {
180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 198 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -191,6 +209,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
191 }, 209 },
192 .driver_data = (void *)&nvidia_chipset_data, 210 .driver_data = (void *)&nvidia_chipset_data,
193 }, 211 },
212 {
213 .callback = mbp_dmi_match,
214 .ident = "MacBookPro 5,2",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
217 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
218 },
219 .driver_data = (void *)&nvidia_chipset_data,
220 },
221 {
222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,5",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
227 },
228 .driver_data = (void *)&nvidia_chipset_data,
229 },
194 { } 230 { }
195}; 231};
196 232
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
new file mode 100644
index 000000000000..467bdb7efb23
--- /dev/null
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -0,0 +1,250 @@
1/*
2 * Backlight driver for Wolfson Microelectronics WM831x PMICs
3 *
4 * Copyright 2009 Wolfson Microelectonics plc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/fb.h>
15#include <linux/backlight.h>
16
17#include <linux/mfd/wm831x/core.h>
18#include <linux/mfd/wm831x/pdata.h>
19#include <linux/mfd/wm831x/regulator.h>
20
21struct wm831x_backlight_data {
22 struct wm831x *wm831x;
23 int isink_reg;
24 int current_brightness;
25};
26
27static int wm831x_backlight_set(struct backlight_device *bl, int brightness)
28{
29 struct wm831x_backlight_data *data = bl_get_data(bl);
30 struct wm831x *wm831x = data->wm831x;
31 int power_up = !data->current_brightness && brightness;
32 int power_down = data->current_brightness && !brightness;
33 int ret;
34
35 if (power_up) {
36 /* Enable the ISINK */
37 ret = wm831x_set_bits(wm831x, data->isink_reg,
38 WM831X_CS1_ENA, WM831X_CS1_ENA);
39 if (ret < 0)
40 goto err;
41
42 /* Enable the DC-DC */
43 ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
44 WM831X_DC4_ENA, WM831X_DC4_ENA);
45 if (ret < 0)
46 goto err;
47 }
48
49 if (power_down) {
50 /* DCDC first */
51 ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
52 WM831X_DC4_ENA, 0);
53 if (ret < 0)
54 goto err;
55
56 /* ISINK */
57 ret = wm831x_set_bits(wm831x, data->isink_reg,
58 WM831X_CS1_DRIVE | WM831X_CS1_ENA, 0);
59 if (ret < 0)
60 goto err;
61 }
62
63 /* Set the new brightness */
64 ret = wm831x_set_bits(wm831x, data->isink_reg,
65 WM831X_CS1_ISEL_MASK, brightness);
66 if (ret < 0)
67 goto err;
68
69 if (power_up) {
70 /* Drive current through the ISINK */
71 ret = wm831x_set_bits(wm831x, data->isink_reg,
72 WM831X_CS1_DRIVE, WM831X_CS1_DRIVE);
73 if (ret < 0)
74 return ret;
75 }
76
77 data->current_brightness = brightness;
78
79 return 0;
80
81err:
82 /* If we were in the middle of a power transition always shut down
83 * for safety.
84 */
85 if (power_up || power_down) {
86 wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
87 wm831x_set_bits(wm831x, data->isink_reg, WM831X_CS1_ENA, 0);
88 }
89
90 return ret;
91}
92
93static int wm831x_backlight_update_status(struct backlight_device *bl)
94{
95 int brightness = bl->props.brightness;
96
97 if (bl->props.power != FB_BLANK_UNBLANK)
98 brightness = 0;
99
100 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
101 brightness = 0;
102
103 if (bl->props.state & BL_CORE_SUSPENDED)
104 brightness = 0;
105
106 return wm831x_backlight_set(bl, brightness);
107}
108
109static int wm831x_backlight_get_brightness(struct backlight_device *bl)
110{
111 struct wm831x_backlight_data *data = bl_get_data(bl);
112 return data->current_brightness;
113}
114
115static struct backlight_ops wm831x_backlight_ops = {
116 .options = BL_CORE_SUSPENDRESUME,
117 .update_status = wm831x_backlight_update_status,
118 .get_brightness = wm831x_backlight_get_brightness,
119};
120
121static int wm831x_backlight_probe(struct platform_device *pdev)
122{
123 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
124 struct wm831x_pdata *wm831x_pdata;
125 struct wm831x_backlight_pdata *pdata;
126 struct wm831x_backlight_data *data;
127 struct backlight_device *bl;
128 int ret, i, max_isel, isink_reg, dcdc_cfg;
129
130 /* We need platform data */
131 if (pdev->dev.parent->platform_data) {
132 wm831x_pdata = pdev->dev.parent->platform_data;
133 pdata = wm831x_pdata->backlight;
134 } else {
135 pdata = NULL;
136 }
137
138 if (!pdata) {
139 dev_err(&pdev->dev, "No platform data supplied\n");
140 return -EINVAL;
141 }
142
143 /* Figure out the maximum current we can use */
144 for (i = 0; i < WM831X_ISINK_MAX_ISEL; i++) {
145 if (wm831x_isinkv_values[i] > pdata->max_uA)
146 break;
147 }
148
149 if (i == 0) {
150 dev_err(&pdev->dev, "Invalid max_uA: %duA\n", pdata->max_uA);
151 return -EINVAL;
152 }
153 max_isel = i - 1;
154
155 if (pdata->max_uA != wm831x_isinkv_values[max_isel])
156 dev_warn(&pdev->dev,
157 "Maximum current is %duA not %duA as requested\n",
158 wm831x_isinkv_values[max_isel], pdata->max_uA);
159
160 switch (pdata->isink) {
161 case 1:
162 isink_reg = WM831X_CURRENT_SINK_1;
163 dcdc_cfg = 0;
164 break;
165 case 2:
166 isink_reg = WM831X_CURRENT_SINK_2;
167 dcdc_cfg = WM831X_DC4_FBSRC;
168 break;
169 default:
170 dev_err(&pdev->dev, "Invalid ISINK %d\n", pdata->isink);
171 return -EINVAL;
172 }
173
174 /* Configure the ISINK to use for feedback */
175 ret = wm831x_reg_unlock(wm831x);
176 if (ret < 0)
177 return ret;
178
179 ret = wm831x_set_bits(wm831x, WM831X_DC4_CONTROL, WM831X_DC4_FBSRC,
180 dcdc_cfg);
181
182 wm831x_reg_lock(wm831x);
183 if (ret < 0)
184 return ret;
185
186 data = kzalloc(sizeof(*data), GFP_KERNEL);
187 if (data == NULL)
188 return -ENOMEM;
189
190 data->wm831x = wm831x;
191 data->current_brightness = 0;
192 data->isink_reg = isink_reg;
193
194 bl = backlight_device_register("wm831x", &pdev->dev,
195 data, &wm831x_backlight_ops);
196 if (IS_ERR(bl)) {
197 dev_err(&pdev->dev, "failed to register backlight\n");
198 kfree(data);
199 return PTR_ERR(bl);
200 }
201
202 bl->props.max_brightness = max_isel;
203 bl->props.brightness = max_isel;
204
205 platform_set_drvdata(pdev, bl);
206
207 /* Disable the DCDC if it was started so we can bootstrap */
208 wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
209
210
211 backlight_update_status(bl);
212
213 return 0;
214}
215
216static int wm831x_backlight_remove(struct platform_device *pdev)
217{
218 struct backlight_device *bl = platform_get_drvdata(pdev);
219 struct wm831x_backlight_data *data = bl_get_data(bl);
220
221 backlight_device_unregister(bl);
222 kfree(data);
223 return 0;
224}
225
226static struct platform_driver wm831x_backlight_driver = {
227 .driver = {
228 .name = "wm831x-backlight",
229 .owner = THIS_MODULE,
230 },
231 .probe = wm831x_backlight_probe,
232 .remove = wm831x_backlight_remove,
233};
234
235static int __init wm831x_backlight_init(void)
236{
237 return platform_driver_register(&wm831x_backlight_driver);
238}
239module_init(wm831x_backlight_init);
240
241static void __exit wm831x_backlight_exit(void)
242{
243 platform_driver_unregister(&wm831x_backlight_driver);
244}
245module_exit(wm831x_backlight_exit);
246
247MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
248MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
249MODULE_LICENSE("GPL");
250MODULE_ALIAS("platform:wm831x-backlight");
diff --git a/fs/buffer.c b/fs/buffer.c
index 24afd7422ae8..6fa530256bfd 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -280,7 +280,7 @@ void invalidate_bdev(struct block_device *bdev)
280EXPORT_SYMBOL(invalidate_bdev); 280EXPORT_SYMBOL(invalidate_bdev);
281 281
282/* 282/*
283 * Kick pdflush then try to free up some ZONE_NORMAL memory. 283 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
284 */ 284 */
285static void free_more_memory(void) 285static void free_more_memory(void)
286{ 286{
@@ -1709,9 +1709,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1709 /* 1709 /*
1710 * If it's a fully non-blocking write attempt and we cannot 1710 * If it's a fully non-blocking write attempt and we cannot
1711 * lock the buffer then redirty the page. Note that this can 1711 * lock the buffer then redirty the page. Note that this can
1712 * potentially cause a busy-wait loop from pdflush and kswapd 1712 * potentially cause a busy-wait loop from writeback threads
1713 * activity, but those code paths have their own higher-level 1713 * and kswapd activity, but those code paths have their own
1714 * throttling. 1714 * higher-level throttling.
1715 */ 1715 */
1716 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1716 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1717 lock_buffer(bh); 1717 lock_buffer(bh);
@@ -3208,7 +3208,7 @@ EXPORT_SYMBOL(block_sync_page);
3208 * still running obsolete flush daemons, so we terminate them here. 3208 * still running obsolete flush daemons, so we terminate them here.
3209 * 3209 *
3210 * Use of bdflush() is deprecated and will be removed in a future kernel. 3210 * Use of bdflush() is deprecated and will be removed in a future kernel.
3211 * The `pdflush' kernel threads fully replace bdflush daemons and this call. 3211 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3212 */ 3212 */
3213SYSCALL_DEFINE2(bdflush, int, func, long, data) 3213SYSCALL_DEFINE2(bdflush, int, func, long, data)
3214{ 3214{
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 6994a0f54f02..80f352596807 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,6 +2,7 @@ config CIFS
2 tristate "CIFS support (advanced network filesystem, SMBFS successor)" 2 tristate "CIFS support (advanced network filesystem, SMBFS successor)"
3 depends on INET 3 depends on INET
4 select NLS 4 select NLS
5 select SLOW_WORK
5 help 6 help
6 This is the client VFS module for the Common Internet File System 7 This is the client VFS module for the Common Internet File System
7 (CIFS) protocol which is the successor to the Server Message Block 8 (CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 90c5b39f0313..9a5e4f5f3122 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,9 +64,6 @@ unsigned int multiuser_mount = 0;
64unsigned int extended_security = CIFSSEC_DEF; 64unsigned int extended_security = CIFSSEC_DEF;
65/* unsigned int ntlmv2_support = 0; */ 65/* unsigned int ntlmv2_support = 0; */
66unsigned int sign_CIFS_PDUs = 1; 66unsigned int sign_CIFS_PDUs = 1;
67extern struct task_struct *oplockThread; /* remove sparse warning */
68struct task_struct *oplockThread = NULL;
69/* extern struct task_struct * dnotifyThread; remove sparse warning */
70static const struct super_operations cifs_super_ops; 67static const struct super_operations cifs_super_ops;
71unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 68unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72module_param(CIFSMaxBufSize, int, 0); 69module_param(CIFSMaxBufSize, int, 0);
@@ -972,89 +969,12 @@ cifs_destroy_mids(void)
972 kmem_cache_destroy(cifs_oplock_cachep); 969 kmem_cache_destroy(cifs_oplock_cachep);
973} 970}
974 971
975static int cifs_oplock_thread(void *dummyarg)
976{
977 struct oplock_q_entry *oplock_item;
978 struct cifsTconInfo *pTcon;
979 struct inode *inode;
980 __u16 netfid;
981 int rc, waitrc = 0;
982
983 set_freezable();
984 do {
985 if (try_to_freeze())
986 continue;
987
988 spin_lock(&cifs_oplock_lock);
989 if (list_empty(&cifs_oplock_list)) {
990 spin_unlock(&cifs_oplock_lock);
991 set_current_state(TASK_INTERRUPTIBLE);
992 schedule_timeout(39*HZ);
993 } else {
994 oplock_item = list_entry(cifs_oplock_list.next,
995 struct oplock_q_entry, qhead);
996 cFYI(1, ("found oplock item to write out"));
997 pTcon = oplock_item->tcon;
998 inode = oplock_item->pinode;
999 netfid = oplock_item->netfid;
1000 spin_unlock(&cifs_oplock_lock);
1001 DeleteOplockQEntry(oplock_item);
1002 /* can not grab inode sem here since it would
1003 deadlock when oplock received on delete
1004 since vfs_unlink holds the i_mutex across
1005 the call */
1006 /* mutex_lock(&inode->i_mutex);*/
1007 if (S_ISREG(inode->i_mode)) {
1008#ifdef CONFIG_CIFS_EXPERIMENTAL
1009 if (CIFS_I(inode)->clientCanCacheAll == 0)
1010 break_lease(inode, FMODE_READ);
1011 else if (CIFS_I(inode)->clientCanCacheRead == 0)
1012 break_lease(inode, FMODE_WRITE);
1013#endif
1014 rc = filemap_fdatawrite(inode->i_mapping);
1015 if (CIFS_I(inode)->clientCanCacheRead == 0) {
1016 waitrc = filemap_fdatawait(
1017 inode->i_mapping);
1018 invalidate_remote_inode(inode);
1019 }
1020 if (rc == 0)
1021 rc = waitrc;
1022 } else
1023 rc = 0;
1024 /* mutex_unlock(&inode->i_mutex);*/
1025 if (rc)
1026 CIFS_I(inode)->write_behind_rc = rc;
1027 cFYI(1, ("Oplock flush inode %p rc %d",
1028 inode, rc));
1029
1030 /* releasing stale oplock after recent reconnect
1031 of smb session using a now incorrect file
1032 handle is not a data integrity issue but do
1033 not bother sending an oplock release if session
1034 to server still is disconnected since oplock
1035 already released by the server in that case */
1036 if (!pTcon->need_reconnect) {
1037 rc = CIFSSMBLock(0, pTcon, netfid,
1038 0 /* len */ , 0 /* offset */, 0,
1039 0, LOCKING_ANDX_OPLOCK_RELEASE,
1040 false /* wait flag */);
1041 cFYI(1, ("Oplock release rc = %d", rc));
1042 }
1043 set_current_state(TASK_INTERRUPTIBLE);
1044 schedule_timeout(1); /* yield in case q were corrupt */
1045 }
1046 } while (!kthread_should_stop());
1047
1048 return 0;
1049}
1050
1051static int __init 972static int __init
1052init_cifs(void) 973init_cifs(void)
1053{ 974{
1054 int rc = 0; 975 int rc = 0;
1055 cifs_proc_init(); 976 cifs_proc_init();
1056 INIT_LIST_HEAD(&cifs_tcp_ses_list); 977 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1057 INIT_LIST_HEAD(&cifs_oplock_list);
1058#ifdef CONFIG_CIFS_EXPERIMENTAL 978#ifdef CONFIG_CIFS_EXPERIMENTAL
1059 INIT_LIST_HEAD(&GlobalDnotifyReqList); 979 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1060 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); 980 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
@@ -1083,7 +1003,6 @@ init_cifs(void)
1083 rwlock_init(&GlobalSMBSeslock); 1003 rwlock_init(&GlobalSMBSeslock);
1084 rwlock_init(&cifs_tcp_ses_lock); 1004 rwlock_init(&cifs_tcp_ses_lock);
1085 spin_lock_init(&GlobalMid_Lock); 1005 spin_lock_init(&GlobalMid_Lock);
1086 spin_lock_init(&cifs_oplock_lock);
1087 1006
1088 if (cifs_max_pending < 2) { 1007 if (cifs_max_pending < 2) {
1089 cifs_max_pending = 2; 1008 cifs_max_pending = 2;
@@ -1118,16 +1037,13 @@ init_cifs(void)
1118 if (rc) 1037 if (rc)
1119 goto out_unregister_key_type; 1038 goto out_unregister_key_type;
1120#endif 1039#endif
1121 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd"); 1040 rc = slow_work_register_user();
1122 if (IS_ERR(oplockThread)) { 1041 if (rc)
1123 rc = PTR_ERR(oplockThread); 1042 goto out_unregister_resolver_key;
1124 cERROR(1, ("error %d create oplock thread", rc));
1125 goto out_unregister_dfs_key_type;
1126 }
1127 1043
1128 return 0; 1044 return 0;
1129 1045
1130 out_unregister_dfs_key_type: 1046 out_unregister_resolver_key:
1131#ifdef CONFIG_CIFS_DFS_UPCALL 1047#ifdef CONFIG_CIFS_DFS_UPCALL
1132 unregister_key_type(&key_type_dns_resolver); 1048 unregister_key_type(&key_type_dns_resolver);
1133 out_unregister_key_type: 1049 out_unregister_key_type:
@@ -1164,7 +1080,6 @@ exit_cifs(void)
1164 cifs_destroy_inodecache(); 1080 cifs_destroy_inodecache();
1165 cifs_destroy_mids(); 1081 cifs_destroy_mids();
1166 cifs_destroy_request_bufs(); 1082 cifs_destroy_request_bufs();
1167 kthread_stop(oplockThread);
1168} 1083}
1169 1084
1170MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); 1085MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6cfc81a32703..5d0fde18039c 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/in.h> 19#include <linux/in.h>
20#include <linux/in6.h> 20#include <linux/in6.h>
21#include <linux/slow-work.h>
21#include "cifs_fs_sb.h" 22#include "cifs_fs_sb.h"
22#include "cifsacl.h" 23#include "cifsacl.h"
23/* 24/*
@@ -346,14 +347,16 @@ struct cifsFileInfo {
346 /* lock scope id (0 if none) */ 347 /* lock scope id (0 if none) */
347 struct file *pfile; /* needed for writepage */ 348 struct file *pfile; /* needed for writepage */
348 struct inode *pInode; /* needed for oplock break */ 349 struct inode *pInode; /* needed for oplock break */
350 struct vfsmount *mnt;
349 struct mutex lock_mutex; 351 struct mutex lock_mutex;
350 struct list_head llist; /* list of byte range locks we have. */ 352 struct list_head llist; /* list of byte range locks we have. */
351 bool closePend:1; /* file is marked to close */ 353 bool closePend:1; /* file is marked to close */
352 bool invalidHandle:1; /* file closed via session abend */ 354 bool invalidHandle:1; /* file closed via session abend */
353 bool messageMode:1; /* for pipes: message vs byte mode */ 355 bool oplock_break_cancelled:1;
354 atomic_t count; /* reference count */ 356 atomic_t count; /* reference count */
355 struct mutex fh_mutex; /* prevents reopen race after dead ses*/ 357 struct mutex fh_mutex; /* prevents reopen race after dead ses*/
356 struct cifs_search_info srch_inf; 358 struct cifs_search_info srch_inf;
359 struct slow_work oplock_break; /* slow_work job for oplock breaks */
357}; 360};
358 361
359/* Take a reference on the file private data */ 362/* Take a reference on the file private data */
@@ -365,8 +368,10 @@ static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file)
365/* Release a reference on the file private data */ 368/* Release a reference on the file private data */
366static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 369static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
367{ 370{
368 if (atomic_dec_and_test(&cifs_file->count)) 371 if (atomic_dec_and_test(&cifs_file->count)) {
372 iput(cifs_file->pInode);
369 kfree(cifs_file); 373 kfree(cifs_file);
374 }
370} 375}
371 376
372/* 377/*
@@ -382,7 +387,6 @@ struct cifsInodeInfo {
382 unsigned long time; /* jiffies of last update/check of inode */ 387 unsigned long time; /* jiffies of last update/check of inode */
383 bool clientCanCacheRead:1; /* read oplock */ 388 bool clientCanCacheRead:1; /* read oplock */
384 bool clientCanCacheAll:1; /* read and writebehind oplock */ 389 bool clientCanCacheAll:1; /* read and writebehind oplock */
385 bool oplockPending:1;
386 bool delete_pending:1; /* DELETE_ON_CLOSE is set */ 390 bool delete_pending:1; /* DELETE_ON_CLOSE is set */
387 u64 server_eof; /* current file size on server */ 391 u64 server_eof; /* current file size on server */
388 u64 uniqueid; /* server inode number */ 392 u64 uniqueid; /* server inode number */
@@ -585,9 +589,9 @@ require use of the stronger protocol */
585#define CIFSSEC_MUST_LANMAN 0x10010 589#define CIFSSEC_MUST_LANMAN 0x10010
586#define CIFSSEC_MUST_PLNTXT 0x20020 590#define CIFSSEC_MUST_PLNTXT 0x20020
587#ifdef CONFIG_CIFS_UPCALL 591#ifdef CONFIG_CIFS_UPCALL
588#define CIFSSEC_MASK 0xAF0AF /* allows weak security but also krb5 */ 592#define CIFSSEC_MASK 0xBF0BF /* allows weak security but also krb5 */
589#else 593#else
590#define CIFSSEC_MASK 0xA70A7 /* current flags supported if weak */ 594#define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */
591#endif /* UPCALL */ 595#endif /* UPCALL */
592#else /* do not allow weak pw hash */ 596#else /* do not allow weak pw hash */
593#ifdef CONFIG_CIFS_UPCALL 597#ifdef CONFIG_CIFS_UPCALL
@@ -669,12 +673,6 @@ GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock;
669 */ 673 */
670GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; 674GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
671 675
672/* Global list of oplocks */
673GLOBAL_EXTERN struct list_head cifs_oplock_list;
674
675/* Protects the cifs_oplock_list */
676GLOBAL_EXTERN spinlock_t cifs_oplock_lock;
677
678/* Outstanding dir notify requests */ 676/* Outstanding dir notify requests */
679GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; 677GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
680/* DirNotify response queue */ 678/* DirNotify response queue */
@@ -725,3 +723,4 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
725GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ 723GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */
726GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ 724GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
727 725
726extern const struct slow_work_ops cifs_oplock_break_ops;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index da8fbf565991..6928c24d1d42 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -86,18 +86,17 @@ extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
86 const int stage, 86 const int stage,
87 const struct nls_table *nls_cp); 87 const struct nls_table *nls_cp);
88extern __u16 GetNextMid(struct TCP_Server_Info *server); 88extern __u16 GetNextMid(struct TCP_Server_Info *server);
89extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
90 struct cifsTconInfo *);
91extern void DeleteOplockQEntry(struct oplock_q_entry *);
92extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
93extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); 89extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
94extern u64 cifs_UnixTimeToNT(struct timespec); 90extern u64 cifs_UnixTimeToNT(struct timespec);
95extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 91extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
96 int offset); 92 int offset);
97 93
94extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
95 __u16 fileHandle, struct file *file,
96 struct vfsmount *mnt, unsigned int oflags);
98extern int cifs_posix_open(char *full_path, struct inode **pinode, 97extern int cifs_posix_open(char *full_path, struct inode **pinode,
99 struct super_block *sb, int mode, int oflags, 98 struct vfsmount *mnt, int mode, int oflags,
100 int *poplock, __u16 *pnetfid, int xid); 99 __u32 *poplock, __u16 *pnetfid, int xid);
101extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, 100extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
102 FILE_UNIX_BASIC_INFO *info, 101 FILE_UNIX_BASIC_INFO *info,
103 struct cifs_sb_info *cifs_sb); 102 struct cifs_sb_info *cifs_sb);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 301e307e1279..941441d3e386 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -94,6 +94,7 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
94 list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { 94 list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
95 open_file = list_entry(tmp, struct cifsFileInfo, tlist); 95 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
96 open_file->invalidHandle = true; 96 open_file->invalidHandle = true;
97 open_file->oplock_break_cancelled = true;
97 } 98 }
98 write_unlock(&GlobalSMBSeslock); 99 write_unlock(&GlobalSMBSeslock);
99 /* BB Add call to invalidate_inodes(sb) for all superblocks mounted 100 /* BB Add call to invalidate_inodes(sb) for all superblocks mounted
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d49682433c20..43003e0bef18 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1670,7 +1670,6 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
1670 CIFSSMBTDis(xid, tcon); 1670 CIFSSMBTDis(xid, tcon);
1671 _FreeXid(xid); 1671 _FreeXid(xid);
1672 1672
1673 DeleteTconOplockQEntries(tcon);
1674 tconInfoFree(tcon); 1673 tconInfoFree(tcon);
1675 cifs_put_smb_ses(ses); 1674 cifs_put_smb_ses(ses);
1676} 1675}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a6424cfc0121..627a60a6c1b1 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -24,6 +24,7 @@
24#include <linux/stat.h> 24#include <linux/stat.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/namei.h> 26#include <linux/namei.h>
27#include <linux/mount.h>
27#include "cifsfs.h" 28#include "cifsfs.h"
28#include "cifspdu.h" 29#include "cifspdu.h"
29#include "cifsglob.h" 30#include "cifsglob.h"
@@ -129,44 +130,45 @@ cifs_bp_rename_retry:
129 return full_path; 130 return full_path;
130} 131}
131 132
132static void 133struct cifsFileInfo *
133cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle, 134cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
134 struct cifsTconInfo *tcon, bool write_only) 135 struct file *file, struct vfsmount *mnt, unsigned int oflags)
135{ 136{
136 int oplock = 0; 137 int oplock = 0;
137 struct cifsFileInfo *pCifsFile; 138 struct cifsFileInfo *pCifsFile;
138 struct cifsInodeInfo *pCifsInode; 139 struct cifsInodeInfo *pCifsInode;
140 struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
139 141
140 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); 142 pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
141
142 if (pCifsFile == NULL) 143 if (pCifsFile == NULL)
143 return; 144 return pCifsFile;
144 145
145 if (oplockEnabled) 146 if (oplockEnabled)
146 oplock = REQ_OPLOCK; 147 oplock = REQ_OPLOCK;
147 148
148 pCifsFile->netfid = fileHandle; 149 pCifsFile->netfid = fileHandle;
149 pCifsFile->pid = current->tgid; 150 pCifsFile->pid = current->tgid;
150 pCifsFile->pInode = newinode; 151 pCifsFile->pInode = igrab(newinode);
152 pCifsFile->mnt = mnt;
153 pCifsFile->pfile = file;
151 pCifsFile->invalidHandle = false; 154 pCifsFile->invalidHandle = false;
152 pCifsFile->closePend = false; 155 pCifsFile->closePend = false;
153 mutex_init(&pCifsFile->fh_mutex); 156 mutex_init(&pCifsFile->fh_mutex);
154 mutex_init(&pCifsFile->lock_mutex); 157 mutex_init(&pCifsFile->lock_mutex);
155 INIT_LIST_HEAD(&pCifsFile->llist); 158 INIT_LIST_HEAD(&pCifsFile->llist);
156 atomic_set(&pCifsFile->count, 1); 159 atomic_set(&pCifsFile->count, 1);
160 slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops);
157 161
158 /* set the following in open now
159 pCifsFile->pfile = file; */
160 write_lock(&GlobalSMBSeslock); 162 write_lock(&GlobalSMBSeslock);
161 list_add(&pCifsFile->tlist, &tcon->openFileList); 163 list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
162 pCifsInode = CIFS_I(newinode); 164 pCifsInode = CIFS_I(newinode);
163 if (pCifsInode) { 165 if (pCifsInode) {
164 /* if readable file instance put first in list*/ 166 /* if readable file instance put first in list*/
165 if (write_only) 167 if (oflags & FMODE_READ)
168 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
169 else
166 list_add_tail(&pCifsFile->flist, 170 list_add_tail(&pCifsFile->flist,
167 &pCifsInode->openFileList); 171 &pCifsInode->openFileList);
168 else
169 list_add(&pCifsFile->flist, &pCifsInode->openFileList);
170 172
171 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 173 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
172 pCifsInode->clientCanCacheAll = true; 174 pCifsInode->clientCanCacheAll = true;
@@ -176,18 +178,18 @@ cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle,
176 pCifsInode->clientCanCacheRead = true; 178 pCifsInode->clientCanCacheRead = true;
177 } 179 }
178 write_unlock(&GlobalSMBSeslock); 180 write_unlock(&GlobalSMBSeslock);
181
182 return pCifsFile;
179} 183}
180 184
181int cifs_posix_open(char *full_path, struct inode **pinode, 185int cifs_posix_open(char *full_path, struct inode **pinode,
182 struct super_block *sb, int mode, int oflags, 186 struct vfsmount *mnt, int mode, int oflags,
183 int *poplock, __u16 *pnetfid, int xid) 187 __u32 *poplock, __u16 *pnetfid, int xid)
184{ 188{
185 int rc; 189 int rc;
186 __u32 oplock;
187 bool write_only = false;
188 FILE_UNIX_BASIC_INFO *presp_data; 190 FILE_UNIX_BASIC_INFO *presp_data;
189 __u32 posix_flags = 0; 191 __u32 posix_flags = 0;
190 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 192 struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
191 struct cifs_fattr fattr; 193 struct cifs_fattr fattr;
192 194
193 cFYI(1, ("posix open %s", full_path)); 195 cFYI(1, ("posix open %s", full_path));
@@ -223,12 +225,9 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
223 if (oflags & O_DIRECT) 225 if (oflags & O_DIRECT)
224 posix_flags |= SMB_O_DIRECT; 226 posix_flags |= SMB_O_DIRECT;
225 227
226 if (!(oflags & FMODE_READ))
227 write_only = true;
228
229 mode &= ~current_umask(); 228 mode &= ~current_umask();
230 rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode, 229 rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
231 pnetfid, presp_data, &oplock, full_path, 230 pnetfid, presp_data, poplock, full_path,
232 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 231 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
233 CIFS_MOUNT_MAP_SPECIAL_CHR); 232 CIFS_MOUNT_MAP_SPECIAL_CHR);
234 if (rc) 233 if (rc)
@@ -244,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
244 243
245 /* get new inode and set it up */ 244 /* get new inode and set it up */
246 if (*pinode == NULL) { 245 if (*pinode == NULL) {
247 *pinode = cifs_iget(sb, &fattr); 246 *pinode = cifs_iget(mnt->mnt_sb, &fattr);
248 if (!*pinode) { 247 if (!*pinode) {
249 rc = -ENOMEM; 248 rc = -ENOMEM;
250 goto posix_open_ret; 249 goto posix_open_ret;
@@ -253,7 +252,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
253 cifs_fattr_to_inode(*pinode, &fattr); 252 cifs_fattr_to_inode(*pinode, &fattr);
254 } 253 }
255 254
256 cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only); 255 cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
257 256
258posix_open_ret: 257posix_open_ret:
259 kfree(presp_data); 258 kfree(presp_data);
@@ -280,7 +279,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
280 int rc = -ENOENT; 279 int rc = -ENOENT;
281 int xid; 280 int xid;
282 int create_options = CREATE_NOT_DIR; 281 int create_options = CREATE_NOT_DIR;
283 int oplock = 0; 282 __u32 oplock = 0;
284 int oflags; 283 int oflags;
285 bool posix_create = false; 284 bool posix_create = false;
286 /* 285 /*
@@ -298,7 +297,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
298 FILE_ALL_INFO *buf = NULL; 297 FILE_ALL_INFO *buf = NULL;
299 struct inode *newinode = NULL; 298 struct inode *newinode = NULL;
300 int disposition = FILE_OVERWRITE_IF; 299 int disposition = FILE_OVERWRITE_IF;
301 bool write_only = false;
302 300
303 xid = GetXid(); 301 xid = GetXid();
304 302
@@ -323,7 +321,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
323 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && 321 if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
324 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 322 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
325 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 323 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
326 rc = cifs_posix_open(full_path, &newinode, inode->i_sb, 324 rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
327 mode, oflags, &oplock, &fileHandle, xid); 325 mode, oflags, &oplock, &fileHandle, xid);
328 /* EIO could indicate that (posix open) operation is not 326 /* EIO could indicate that (posix open) operation is not
329 supported, despite what server claimed in capability 327 supported, despite what server claimed in capability
@@ -351,11 +349,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
351 desiredAccess = 0; 349 desiredAccess = 0;
352 if (oflags & FMODE_READ) 350 if (oflags & FMODE_READ)
353 desiredAccess |= GENERIC_READ; /* is this too little? */ 351 desiredAccess |= GENERIC_READ; /* is this too little? */
354 if (oflags & FMODE_WRITE) { 352 if (oflags & FMODE_WRITE)
355 desiredAccess |= GENERIC_WRITE; 353 desiredAccess |= GENERIC_WRITE;
356 if (!(oflags & FMODE_READ))
357 write_only = true;
358 }
359 354
360 if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 355 if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
361 disposition = FILE_CREATE; 356 disposition = FILE_CREATE;
@@ -470,8 +465,8 @@ cifs_create_set_dentry:
470 /* mknod case - do not leave file open */ 465 /* mknod case - do not leave file open */
471 CIFSSMBClose(xid, tcon, fileHandle); 466 CIFSSMBClose(xid, tcon, fileHandle);
472 } else if (!(posix_create) && (newinode)) { 467 } else if (!(posix_create) && (newinode)) {
473 cifs_fill_fileinfo(newinode, fileHandle, 468 cifs_new_fileinfo(newinode, fileHandle, NULL,
474 cifs_sb->tcon, write_only); 469 nd->path.mnt, oflags);
475 } 470 }
476cifs_create_out: 471cifs_create_out:
477 kfree(buf); 472 kfree(buf);
@@ -611,7 +606,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
611{ 606{
612 int xid; 607 int xid;
613 int rc = 0; /* to get around spurious gcc warning, set to zero here */ 608 int rc = 0; /* to get around spurious gcc warning, set to zero here */
614 int oplock = 0; 609 __u32 oplock = 0;
615 __u16 fileHandle = 0; 610 __u16 fileHandle = 0;
616 bool posix_open = false; 611 bool posix_open = false;
617 struct cifs_sb_info *cifs_sb; 612 struct cifs_sb_info *cifs_sb;
@@ -683,8 +678,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
683 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && 678 if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
684 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && 679 (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
685 (nd->intent.open.flags & O_CREAT)) { 680 (nd->intent.open.flags & O_CREAT)) {
686 rc = cifs_posix_open(full_path, &newInode, 681 rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
687 parent_dir_inode->i_sb,
688 nd->intent.open.create_mode, 682 nd->intent.open.create_mode,
689 nd->intent.open.flags, &oplock, 683 nd->intent.open.flags, &oplock,
690 &fileHandle, xid); 684 &fileHandle, xid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fa7beac8b80e..429337eb7afe 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -30,6 +30,7 @@
30#include <linux/writeback.h> 30#include <linux/writeback.h>
31#include <linux/task_io_accounting_ops.h> 31#include <linux/task_io_accounting_ops.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/mount.h>
33#include <asm/div64.h> 34#include <asm/div64.h>
34#include "cifsfs.h" 35#include "cifsfs.h"
35#include "cifspdu.h" 36#include "cifspdu.h"
@@ -39,27 +40,6 @@
39#include "cifs_debug.h" 40#include "cifs_debug.h"
40#include "cifs_fs_sb.h" 41#include "cifs_fs_sb.h"
41 42
42static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
45{
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 mutex_init(&private_data->fh_mutex);
50 mutex_init(&private_data->lock_mutex);
51 INIT_LIST_HEAD(&private_data->llist);
52 private_data->pfile = file; /* needed for writepage */
53 private_data->pInode = inode;
54 private_data->invalidHandle = false;
55 private_data->closePend = false;
56 /* Initialize reference count to one. The private data is
57 freed on the release of the last reference */
58 atomic_set(&private_data->count, 1);
59
60 return private_data;
61}
62
63static inline int cifs_convert_flags(unsigned int flags) 43static inline int cifs_convert_flags(unsigned int flags)
64{ 44{
65 if ((flags & O_ACCMODE) == O_RDONLY) 45 if ((flags & O_ACCMODE) == O_RDONLY)
@@ -123,9 +103,11 @@ static inline int cifs_get_disposition(unsigned int flags)
123} 103}
124 104
125/* all arguments to this function must be checked for validity in caller */ 105/* all arguments to this function must be checked for validity in caller */
126static inline int cifs_posix_open_inode_helper(struct inode *inode, 106static inline int
127 struct file *file, struct cifsInodeInfo *pCifsInode, 107cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
128 struct cifsFileInfo *pCifsFile, int oplock, u16 netfid) 108 struct cifsInodeInfo *pCifsInode,
109 struct cifsFileInfo *pCifsFile, __u32 oplock,
110 u16 netfid)
129{ 111{
130 112
131 write_lock(&GlobalSMBSeslock); 113 write_lock(&GlobalSMBSeslock);
@@ -219,17 +201,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
219 struct timespec temp; 201 struct timespec temp;
220 int rc; 202 int rc;
221 203
222 /* want handles we can use to read with first
223 in the list so we do not have to walk the
224 list to search for one in write_begin */
225 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
226 list_add_tail(&pCifsFile->flist,
227 &pCifsInode->openFileList);
228 } else {
229 list_add(&pCifsFile->flist,
230 &pCifsInode->openFileList);
231 }
232 write_unlock(&GlobalSMBSeslock);
233 if (pCifsInode->clientCanCacheRead) { 204 if (pCifsInode->clientCanCacheRead) {
234 /* we have the inode open somewhere else 205 /* we have the inode open somewhere else
235 no need to discard cache data */ 206 no need to discard cache data */
@@ -279,7 +250,8 @@ client_can_cache:
279int cifs_open(struct inode *inode, struct file *file) 250int cifs_open(struct inode *inode, struct file *file)
280{ 251{
281 int rc = -EACCES; 252 int rc = -EACCES;
282 int xid, oplock; 253 int xid;
254 __u32 oplock;
283 struct cifs_sb_info *cifs_sb; 255 struct cifs_sb_info *cifs_sb;
284 struct cifsTconInfo *tcon; 256 struct cifsTconInfo *tcon;
285 struct cifsFileInfo *pCifsFile; 257 struct cifsFileInfo *pCifsFile;
@@ -324,7 +296,7 @@ int cifs_open(struct inode *inode, struct file *file)
324 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 296 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
325 int oflags = (int) cifs_posix_convert_flags(file->f_flags); 297 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
326 /* can not refresh inode info since size could be stale */ 298 /* can not refresh inode info since size could be stale */
327 rc = cifs_posix_open(full_path, &inode, inode->i_sb, 299 rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
328 cifs_sb->mnt_file_mode /* ignored */, 300 cifs_sb->mnt_file_mode /* ignored */,
329 oflags, &oplock, &netfid, xid); 301 oflags, &oplock, &netfid, xid);
330 if (rc == 0) { 302 if (rc == 0) {
@@ -414,24 +386,17 @@ int cifs_open(struct inode *inode, struct file *file)
414 cFYI(1, ("cifs_open returned 0x%x", rc)); 386 cFYI(1, ("cifs_open returned 0x%x", rc));
415 goto out; 387 goto out;
416 } 388 }
417 file->private_data = 389
418 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); 390 pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
391 file->f_flags);
392 file->private_data = pCifsFile;
419 if (file->private_data == NULL) { 393 if (file->private_data == NULL) {
420 rc = -ENOMEM; 394 rc = -ENOMEM;
421 goto out; 395 goto out;
422 } 396 }
423 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
424 write_lock(&GlobalSMBSeslock);
425 list_add(&pCifsFile->tlist, &tcon->openFileList);
426 397
427 pCifsInode = CIFS_I(file->f_path.dentry->d_inode); 398 rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
428 if (pCifsInode) { 399 &oplock, buf, full_path, xid);
429 rc = cifs_open_inode_helper(inode, file, pCifsInode,
430 pCifsFile, tcon,
431 &oplock, buf, full_path, xid);
432 } else {
433 write_unlock(&GlobalSMBSeslock);
434 }
435 400
436 if (oplock & CIFS_CREATE_ACTION) { 401 if (oplock & CIFS_CREATE_ACTION) {
437 /* time to set mode which we can not set earlier due to 402 /* time to set mode which we can not set earlier due to
@@ -474,7 +439,8 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile)
474static int cifs_reopen_file(struct file *file, bool can_flush) 439static int cifs_reopen_file(struct file *file, bool can_flush)
475{ 440{
476 int rc = -EACCES; 441 int rc = -EACCES;
477 int xid, oplock; 442 int xid;
443 __u32 oplock;
478 struct cifs_sb_info *cifs_sb; 444 struct cifs_sb_info *cifs_sb;
479 struct cifsTconInfo *tcon; 445 struct cifsTconInfo *tcon;
480 struct cifsFileInfo *pCifsFile; 446 struct cifsFileInfo *pCifsFile;
@@ -543,7 +509,7 @@ reopen_error_exit:
543 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 509 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
544 int oflags = (int) cifs_posix_convert_flags(file->f_flags); 510 int oflags = (int) cifs_posix_convert_flags(file->f_flags);
545 /* can not refresh inode info since size could be stale */ 511 /* can not refresh inode info since size could be stale */
546 rc = cifs_posix_open(full_path, NULL, inode->i_sb, 512 rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
547 cifs_sb->mnt_file_mode /* ignored */, 513 cifs_sb->mnt_file_mode /* ignored */,
548 oflags, &oplock, &netfid, xid); 514 oflags, &oplock, &netfid, xid);
549 if (rc == 0) { 515 if (rc == 0) {
@@ -2308,6 +2274,73 @@ out:
2308 return rc; 2274 return rc;
2309} 2275}
2310 2276
2277static void
2278cifs_oplock_break(struct slow_work *work)
2279{
2280 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2281 oplock_break);
2282 struct inode *inode = cfile->pInode;
2283 struct cifsInodeInfo *cinode = CIFS_I(inode);
2284 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
2285 int rc, waitrc = 0;
2286
2287 if (inode && S_ISREG(inode->i_mode)) {
2288#ifdef CONFIG_CIFS_EXPERIMENTAL
2289 if (cinode->clientCanCacheAll == 0)
2290 break_lease(inode, FMODE_READ);
2291 else if (cinode->clientCanCacheRead == 0)
2292 break_lease(inode, FMODE_WRITE);
2293#endif
2294 rc = filemap_fdatawrite(inode->i_mapping);
2295 if (cinode->clientCanCacheRead == 0) {
2296 waitrc = filemap_fdatawait(inode->i_mapping);
2297 invalidate_remote_inode(inode);
2298 }
2299 if (!rc)
2300 rc = waitrc;
2301 if (rc)
2302 cinode->write_behind_rc = rc;
2303 cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
2304 }
2305
2306 /*
2307 * releasing stale oplock after recent reconnect of smb session using
2308 * a now incorrect file handle is not a data integrity issue but do
2309 * not bother sending an oplock release if session to server still is
2310 * disconnected since oplock already released by the server
2311 */
2312 if (!cfile->closePend && !cfile->oplock_break_cancelled) {
2313 rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
2314 LOCKING_ANDX_OPLOCK_RELEASE, false);
2315 cFYI(1, ("Oplock release rc = %d", rc));
2316 }
2317}
2318
2319static int
2320cifs_oplock_break_get(struct slow_work *work)
2321{
2322 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2323 oplock_break);
2324 mntget(cfile->mnt);
2325 cifsFileInfo_get(cfile);
2326 return 0;
2327}
2328
2329static void
2330cifs_oplock_break_put(struct slow_work *work)
2331{
2332 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
2333 oplock_break);
2334 mntput(cfile->mnt);
2335 cifsFileInfo_put(cfile);
2336}
2337
2338const struct slow_work_ops cifs_oplock_break_ops = {
2339 .get_ref = cifs_oplock_break_get,
2340 .put_ref = cifs_oplock_break_put,
2341 .execute = cifs_oplock_break,
2342};
2343
2311const struct address_space_operations cifs_addr_ops = { 2344const struct address_space_operations cifs_addr_ops = {
2312 .readpage = cifs_readpage, 2345 .readpage = cifs_readpage,
2313 .readpages = cifs_readpages, 2346 .readpages = cifs_readpages,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index e079a9190ec4..0241b25ac33f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,7 +32,6 @@
32 32
33extern mempool_t *cifs_sm_req_poolp; 33extern mempool_t *cifs_sm_req_poolp;
34extern mempool_t *cifs_req_poolp; 34extern mempool_t *cifs_req_poolp;
35extern struct task_struct *oplockThread;
36 35
37/* The xid serves as a useful identifier for each incoming vfs request, 36/* The xid serves as a useful identifier for each incoming vfs request,
38 in a similar way to the mid which is useful to track each sent smb, 37 in a similar way to the mid which is useful to track each sent smb,
@@ -500,6 +499,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
500 struct cifsTconInfo *tcon; 499 struct cifsTconInfo *tcon;
501 struct cifsInodeInfo *pCifsInode; 500 struct cifsInodeInfo *pCifsInode;
502 struct cifsFileInfo *netfile; 501 struct cifsFileInfo *netfile;
502 int rc;
503 503
504 cFYI(1, ("Checking for oplock break or dnotify response")); 504 cFYI(1, ("Checking for oplock break or dnotify response"));
505 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && 505 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
@@ -562,30 +562,40 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
562 continue; 562 continue;
563 563
564 cifs_stats_inc(&tcon->num_oplock_brks); 564 cifs_stats_inc(&tcon->num_oplock_brks);
565 write_lock(&GlobalSMBSeslock); 565 read_lock(&GlobalSMBSeslock);
566 list_for_each(tmp2, &tcon->openFileList) { 566 list_for_each(tmp2, &tcon->openFileList) {
567 netfile = list_entry(tmp2, struct cifsFileInfo, 567 netfile = list_entry(tmp2, struct cifsFileInfo,
568 tlist); 568 tlist);
569 if (pSMB->Fid != netfile->netfid) 569 if (pSMB->Fid != netfile->netfid)
570 continue; 570 continue;
571 571
572 write_unlock(&GlobalSMBSeslock); 572 /*
573 read_unlock(&cifs_tcp_ses_lock); 573 * don't do anything if file is about to be
574 * closed anyway.
575 */
576 if (netfile->closePend) {
577 read_unlock(&GlobalSMBSeslock);
578 read_unlock(&cifs_tcp_ses_lock);
579 return true;
580 }
581
574 cFYI(1, ("file id match, oplock break")); 582 cFYI(1, ("file id match, oplock break"));
575 pCifsInode = CIFS_I(netfile->pInode); 583 pCifsInode = CIFS_I(netfile->pInode);
576 pCifsInode->clientCanCacheAll = false; 584 pCifsInode->clientCanCacheAll = false;
577 if (pSMB->OplockLevel == 0) 585 if (pSMB->OplockLevel == 0)
578 pCifsInode->clientCanCacheRead = false; 586 pCifsInode->clientCanCacheRead = false;
579 pCifsInode->oplockPending = true; 587 rc = slow_work_enqueue(&netfile->oplock_break);
580 AllocOplockQEntry(netfile->pInode, 588 if (rc) {
581 netfile->netfid, tcon); 589 cERROR(1, ("failed to enqueue oplock "
582 cFYI(1, ("about to wake up oplock thread")); 590 "break: %d\n", rc));
583 if (oplockThread) 591 } else {
584 wake_up_process(oplockThread); 592 netfile->oplock_break_cancelled = false;
585 593 }
594 read_unlock(&GlobalSMBSeslock);
595 read_unlock(&cifs_tcp_ses_lock);
586 return true; 596 return true;
587 } 597 }
588 write_unlock(&GlobalSMBSeslock); 598 read_unlock(&GlobalSMBSeslock);
589 read_unlock(&cifs_tcp_ses_lock); 599 read_unlock(&cifs_tcp_ses_lock);
590 cFYI(1, ("No matching file for oplock break")); 600 cFYI(1, ("No matching file for oplock break"));
591 return true; 601 return true;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f823a4a208a7..1f098ca71636 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -146,7 +146,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
146 } 146 }
147} 147}
148 148
149void 149static void
150cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, 150cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
151 struct cifs_sb_info *cifs_sb) 151 struct cifs_sb_info *cifs_sb)
152{ 152{
@@ -161,7 +161,7 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
161 cifs_fill_common_info(fattr, cifs_sb); 161 cifs_fill_common_info(fattr, cifs_sb);
162} 162}
163 163
164void 164static void
165cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, 165cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
166 struct cifs_sb_info *cifs_sb) 166 struct cifs_sb_info *cifs_sb)
167{ 167{
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1da4ab250eae..07b8e71544ee 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -103,56 +103,6 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
103 mempool_free(midEntry, cifs_mid_poolp); 103 mempool_free(midEntry, cifs_mid_poolp);
104} 104}
105 105
106struct oplock_q_entry *
107AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
108{
109 struct oplock_q_entry *temp;
110 if ((pinode == NULL) || (tcon == NULL)) {
111 cERROR(1, ("Null parms passed to AllocOplockQEntry"));
112 return NULL;
113 }
114 temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
115 GFP_KERNEL);
116 if (temp == NULL)
117 return temp;
118 else {
119 temp->pinode = pinode;
120 temp->tcon = tcon;
121 temp->netfid = fid;
122 spin_lock(&cifs_oplock_lock);
123 list_add_tail(&temp->qhead, &cifs_oplock_list);
124 spin_unlock(&cifs_oplock_lock);
125 }
126 return temp;
127}
128
129void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
130{
131 spin_lock(&cifs_oplock_lock);
132 /* should we check if list empty first? */
133 list_del(&oplockEntry->qhead);
134 spin_unlock(&cifs_oplock_lock);
135 kmem_cache_free(cifs_oplock_cachep, oplockEntry);
136}
137
138
139void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
140{
141 struct oplock_q_entry *temp;
142
143 if (tcon == NULL)
144 return;
145
146 spin_lock(&cifs_oplock_lock);
147 list_for_each_entry(temp, &cifs_oplock_list, qhead) {
148 if ((temp->tcon) && (temp->tcon == tcon)) {
149 list_del(&temp->qhead);
150 kmem_cache_free(cifs_oplock_cachep, temp);
151 }
152 }
153 spin_unlock(&cifs_oplock_lock);
154}
155
156static int 106static int
157smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) 107smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
158{ 108{
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8e1e5e19d21e..9d5360c4c2af 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -41,8 +41,9 @@ struct wb_writeback_args {
41 long nr_pages; 41 long nr_pages;
42 struct super_block *sb; 42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode; 43 enum writeback_sync_modes sync_mode;
44 int for_kupdate; 44 int for_kupdate:1;
45 int range_cyclic; 45 int range_cyclic:1;
46 int for_background:1;
46}; 47};
47 48
48/* 49/*
@@ -249,14 +250,25 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi,
249 * completion. Caller need not hold sb s_umount semaphore. 250 * completion. Caller need not hold sb s_umount semaphore.
250 * 251 *
251 */ 252 */
252void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) 253void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
254 long nr_pages)
253{ 255{
254 struct wb_writeback_args args = { 256 struct wb_writeback_args args = {
257 .sb = sb,
255 .sync_mode = WB_SYNC_NONE, 258 .sync_mode = WB_SYNC_NONE,
256 .nr_pages = nr_pages, 259 .nr_pages = nr_pages,
257 .range_cyclic = 1, 260 .range_cyclic = 1,
258 }; 261 };
259 262
263 /*
264 * We treat @nr_pages=0 as the special case to do background writeback,
265 * ie. to sync pages until the background dirty threshold is reached.
266 */
267 if (!nr_pages) {
268 args.nr_pages = LONG_MAX;
269 args.for_background = 1;
270 }
271
260 bdi_alloc_queue_work(bdi, &args); 272 bdi_alloc_queue_work(bdi, &args);
261} 273}
262 274
@@ -310,7 +322,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
310 * For inodes being constantly redirtied, dirtied_when can get stuck. 322 * For inodes being constantly redirtied, dirtied_when can get stuck.
311 * It _appears_ to be in the future, but is actually in distant past. 323 * It _appears_ to be in the future, but is actually in distant past.
312 * This test is necessary to prevent such wrapped-around relative times 324 * This test is necessary to prevent such wrapped-around relative times
313 * from permanently stopping the whole pdflush writeback. 325 * from permanently stopping the whole bdi writeback.
314 */ 326 */
315 ret = ret && time_before_eq(inode->dirtied_when, jiffies); 327 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
316#endif 328#endif
@@ -324,13 +336,38 @@ static void move_expired_inodes(struct list_head *delaying_queue,
324 struct list_head *dispatch_queue, 336 struct list_head *dispatch_queue,
325 unsigned long *older_than_this) 337 unsigned long *older_than_this)
326{ 338{
339 LIST_HEAD(tmp);
340 struct list_head *pos, *node;
341 struct super_block *sb = NULL;
342 struct inode *inode;
343 int do_sb_sort = 0;
344
327 while (!list_empty(delaying_queue)) { 345 while (!list_empty(delaying_queue)) {
328 struct inode *inode = list_entry(delaying_queue->prev, 346 inode = list_entry(delaying_queue->prev, struct inode, i_list);
329 struct inode, i_list);
330 if (older_than_this && 347 if (older_than_this &&
331 inode_dirtied_after(inode, *older_than_this)) 348 inode_dirtied_after(inode, *older_than_this))
332 break; 349 break;
333 list_move(&inode->i_list, dispatch_queue); 350 if (sb && sb != inode->i_sb)
351 do_sb_sort = 1;
352 sb = inode->i_sb;
353 list_move(&inode->i_list, &tmp);
354 }
355
356 /* just one sb in list, splice to dispatch_queue and we're done */
357 if (!do_sb_sort) {
358 list_splice(&tmp, dispatch_queue);
359 return;
360 }
361
362 /* Move inodes from one superblock together */
363 while (!list_empty(&tmp)) {
364 inode = list_entry(tmp.prev, struct inode, i_list);
365 sb = inode->i_sb;
366 list_for_each_prev_safe(pos, node, &tmp) {
367 inode = list_entry(pos, struct inode, i_list);
368 if (inode->i_sb == sb)
369 list_move(&inode->i_list, dispatch_queue);
370 }
334 } 371 }
335} 372}
336 373
@@ -439,8 +476,18 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
439 spin_lock(&inode_lock); 476 spin_lock(&inode_lock);
440 inode->i_state &= ~I_SYNC; 477 inode->i_state &= ~I_SYNC;
441 if (!(inode->i_state & (I_FREEING | I_CLEAR))) { 478 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
442 if (!(inode->i_state & I_DIRTY) && 479 if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
443 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { 480 /*
481 * More pages get dirtied by a fast dirtier.
482 */
483 goto select_queue;
484 } else if (inode->i_state & I_DIRTY) {
485 /*
486 * At least XFS will redirty the inode during the
487 * writeback (delalloc) and on io completion (isize).
488 */
489 redirty_tail(inode);
490 } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
444 /* 491 /*
445 * We didn't write back all the pages. nfs_writepages() 492 * We didn't write back all the pages. nfs_writepages()
446 * sometimes bales out without doing anything. Redirty 493 * sometimes bales out without doing anything. Redirty
@@ -462,6 +509,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
462 * soon as the queue becomes uncongested. 509 * soon as the queue becomes uncongested.
463 */ 510 */
464 inode->i_state |= I_DIRTY_PAGES; 511 inode->i_state |= I_DIRTY_PAGES;
512select_queue:
465 if (wbc->nr_to_write <= 0) { 513 if (wbc->nr_to_write <= 0) {
466 /* 514 /*
467 * slice used up: queue for next turn 515 * slice used up: queue for next turn
@@ -484,12 +532,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
484 inode->i_state |= I_DIRTY_PAGES; 532 inode->i_state |= I_DIRTY_PAGES;
485 redirty_tail(inode); 533 redirty_tail(inode);
486 } 534 }
487 } else if (inode->i_state & I_DIRTY) {
488 /*
489 * Someone redirtied the inode while were writing back
490 * the pages.
491 */
492 redirty_tail(inode);
493 } else if (atomic_read(&inode->i_count)) { 535 } else if (atomic_read(&inode->i_count)) {
494 /* 536 /*
495 * The inode is clean, inuse 537 * The inode is clean, inuse
@@ -506,6 +548,17 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
506 return ret; 548 return ret;
507} 549}
508 550
551static void unpin_sb_for_writeback(struct super_block **psb)
552{
553 struct super_block *sb = *psb;
554
555 if (sb) {
556 up_read(&sb->s_umount);
557 put_super(sb);
558 *psb = NULL;
559 }
560}
561
509/* 562/*
510 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned 563 * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
511 * before calling writeback. So make sure that we do pin it, so it doesn't 564 * before calling writeback. So make sure that we do pin it, so it doesn't
@@ -515,11 +568,20 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
515 * 1 if we failed. 568 * 1 if we failed.
516 */ 569 */
517static int pin_sb_for_writeback(struct writeback_control *wbc, 570static int pin_sb_for_writeback(struct writeback_control *wbc,
518 struct inode *inode) 571 struct inode *inode, struct super_block **psb)
519{ 572{
520 struct super_block *sb = inode->i_sb; 573 struct super_block *sb = inode->i_sb;
521 574
522 /* 575 /*
576 * If this sb is already pinned, nothing more to do. If not and
577 * *psb is non-NULL, unpin the old one first
578 */
579 if (sb == *psb)
580 return 0;
581 else if (*psb)
582 unpin_sb_for_writeback(psb);
583
584 /*
523 * Caller must already hold the ref for this 585 * Caller must already hold the ref for this
524 */ 586 */
525 if (wbc->sync_mode == WB_SYNC_ALL) { 587 if (wbc->sync_mode == WB_SYNC_ALL) {
@@ -532,7 +594,7 @@ static int pin_sb_for_writeback(struct writeback_control *wbc,
532 if (down_read_trylock(&sb->s_umount)) { 594 if (down_read_trylock(&sb->s_umount)) {
533 if (sb->s_root) { 595 if (sb->s_root) {
534 spin_unlock(&sb_lock); 596 spin_unlock(&sb_lock);
535 return 0; 597 goto pinned;
536 } 598 }
537 /* 599 /*
538 * umounted, drop rwsem again and fall through to failure 600 * umounted, drop rwsem again and fall through to failure
@@ -543,24 +605,15 @@ static int pin_sb_for_writeback(struct writeback_control *wbc,
543 sb->s_count--; 605 sb->s_count--;
544 spin_unlock(&sb_lock); 606 spin_unlock(&sb_lock);
545 return 1; 607 return 1;
546} 608pinned:
547 609 *psb = sb;
548static void unpin_sb_for_writeback(struct writeback_control *wbc, 610 return 0;
549 struct inode *inode)
550{
551 struct super_block *sb = inode->i_sb;
552
553 if (wbc->sync_mode == WB_SYNC_ALL)
554 return;
555
556 up_read(&sb->s_umount);
557 put_super(sb);
558} 611}
559 612
560static void writeback_inodes_wb(struct bdi_writeback *wb, 613static void writeback_inodes_wb(struct bdi_writeback *wb,
561 struct writeback_control *wbc) 614 struct writeback_control *wbc)
562{ 615{
563 struct super_block *sb = wbc->sb; 616 struct super_block *sb = wbc->sb, *pin_sb = NULL;
564 const int is_blkdev_sb = sb_is_blkdev_sb(sb); 617 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
565 const unsigned long start = jiffies; /* livelock avoidance */ 618 const unsigned long start = jiffies; /* livelock avoidance */
566 619
@@ -619,7 +672,7 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
619 if (inode_dirtied_after(inode, start)) 672 if (inode_dirtied_after(inode, start))
620 break; 673 break;
621 674
622 if (pin_sb_for_writeback(wbc, inode)) { 675 if (pin_sb_for_writeback(wbc, inode, &pin_sb)) {
623 requeue_io(inode); 676 requeue_io(inode);
624 continue; 677 continue;
625 } 678 }
@@ -628,7 +681,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
628 __iget(inode); 681 __iget(inode);
629 pages_skipped = wbc->pages_skipped; 682 pages_skipped = wbc->pages_skipped;
630 writeback_single_inode(inode, wbc); 683 writeback_single_inode(inode, wbc);
631 unpin_sb_for_writeback(wbc, inode);
632 if (wbc->pages_skipped != pages_skipped) { 684 if (wbc->pages_skipped != pages_skipped) {
633 /* 685 /*
634 * writeback is not making progress due to locked 686 * writeback is not making progress due to locked
@@ -648,6 +700,8 @@ static void writeback_inodes_wb(struct bdi_writeback *wb,
648 wbc->more_io = 1; 700 wbc->more_io = 1;
649 } 701 }
650 702
703 unpin_sb_for_writeback(&pin_sb);
704
651 spin_unlock(&inode_lock); 705 spin_unlock(&inode_lock);
652 /* Leave any unwritten inodes on b_io */ 706 /* Leave any unwritten inodes on b_io */
653} 707}
@@ -706,6 +760,7 @@ static long wb_writeback(struct bdi_writeback *wb,
706 }; 760 };
707 unsigned long oldest_jif; 761 unsigned long oldest_jif;
708 long wrote = 0; 762 long wrote = 0;
763 struct inode *inode;
709 764
710 if (wbc.for_kupdate) { 765 if (wbc.for_kupdate) {
711 wbc.older_than_this = &oldest_jif; 766 wbc.older_than_this = &oldest_jif;
@@ -719,20 +774,16 @@ static long wb_writeback(struct bdi_writeback *wb,
719 774
720 for (;;) { 775 for (;;) {
721 /* 776 /*
722 * Don't flush anything for non-integrity writeback where 777 * Stop writeback when nr_pages has been consumed
723 * no nr_pages was given
724 */ 778 */
725 if (!args->for_kupdate && args->nr_pages <= 0 && 779 if (args->nr_pages <= 0)
726 args->sync_mode == WB_SYNC_NONE)
727 break; 780 break;
728 781
729 /* 782 /*
730 * If no specific pages were given and this is just a 783 * For background writeout, stop when we are below the
731 * periodic background writeout and we are below the 784 * background dirty threshold
732 * background dirty threshold, don't do anything
733 */ 785 */
734 if (args->for_kupdate && args->nr_pages <= 0 && 786 if (args->for_background && !over_bground_thresh())
735 !over_bground_thresh())
736 break; 787 break;
737 788
738 wbc.more_io = 0; 789 wbc.more_io = 0;
@@ -744,13 +795,32 @@ static long wb_writeback(struct bdi_writeback *wb,
744 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 795 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
745 796
746 /* 797 /*
747 * If we ran out of stuff to write, bail unless more_io got set 798 * If we consumed everything, see if we have more
748 */ 799 */
749 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 800 if (wbc.nr_to_write <= 0)
750 if (wbc.more_io && !wbc.for_kupdate) 801 continue;
751 continue; 802 /*
803 * Didn't write everything and we don't have more IO, bail
804 */
805 if (!wbc.more_io)
752 break; 806 break;
807 /*
808 * Did we write something? Try for more
809 */
810 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
811 continue;
812 /*
813 * Nothing written. Wait for some inode to
814 * become available for writeback. Otherwise
815 * we'll just busyloop.
816 */
817 spin_lock(&inode_lock);
818 if (!list_empty(&wb->b_more_io)) {
819 inode = list_entry(wb->b_more_io.prev,
820 struct inode, i_list);
821 inode_wait_for_writeback(inode);
753 } 822 }
823 spin_unlock(&inode_lock);
754 } 824 }
755 825
756 return wrote; 826 return wrote;
@@ -1060,9 +1130,6 @@ EXPORT_SYMBOL(__mark_inode_dirty);
1060 * If older_than_this is non-NULL, then only write out inodes which 1130 * If older_than_this is non-NULL, then only write out inodes which
1061 * had their first dirtying at a time earlier than *older_than_this. 1131 * had their first dirtying at a time earlier than *older_than_this.
1062 * 1132 *
1063 * If we're a pdlfush thread, then implement pdflush collision avoidance
1064 * against the entire list.
1065 *
1066 * If `bdi' is non-zero then we're being asked to writeback a specific queue. 1133 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1067 * This function assumes that the blockdev superblock's inodes are backed by 1134 * This function assumes that the blockdev superblock's inodes are backed by
1068 * a variety of queues, so all inodes are searched. For other superblocks, 1135 * a variety of queues, so all inodes are searched. For other superblocks,
@@ -1141,7 +1208,7 @@ void writeback_inodes_sb(struct super_block *sb)
1141 nr_to_write = nr_dirty + nr_unstable + 1208 nr_to_write = nr_dirty + nr_unstable +
1142 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1209 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1143 1210
1144 bdi_writeback_all(sb, nr_to_write); 1211 bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
1145} 1212}
1146EXPORT_SYMBOL(writeback_inodes_sb); 1213EXPORT_SYMBOL(writeback_inodes_sb);
1147 1214
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 1cef1398e358..3cd9ccdcbd8f 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -70,7 +70,6 @@ enum acpi_bus_device_type {
70 ACPI_BUS_TYPE_POWER, 70 ACPI_BUS_TYPE_POWER,
71 ACPI_BUS_TYPE_PROCESSOR, 71 ACPI_BUS_TYPE_PROCESSOR,
72 ACPI_BUS_TYPE_THERMAL, 72 ACPI_BUS_TYPE_THERMAL,
73 ACPI_BUS_TYPE_SYSTEM,
74 ACPI_BUS_TYPE_POWER_BUTTON, 73 ACPI_BUS_TYPE_POWER_BUTTON,
75 ACPI_BUS_TYPE_SLEEP_BUTTON, 74 ACPI_BUS_TYPE_SLEEP_BUTTON,
76 ACPI_BUS_DEVICE_TYPE_COUNT 75 ACPI_BUS_DEVICE_TYPE_COUNT
@@ -142,10 +141,7 @@ struct acpi_device_status {
142 141
143struct acpi_device_flags { 142struct acpi_device_flags {
144 u32 dynamic_status:1; 143 u32 dynamic_status:1;
145 u32 hardware_id:1;
146 u32 compatible_ids:1;
147 u32 bus_address:1; 144 u32 bus_address:1;
148 u32 unique_id:1;
149 u32 removable:1; 145 u32 removable:1;
150 u32 ejectable:1; 146 u32 ejectable:1;
151 u32 lockable:1; 147 u32 lockable:1;
@@ -154,7 +150,7 @@ struct acpi_device_flags {
154 u32 performance_manageable:1; 150 u32 performance_manageable:1;
155 u32 wake_capable:1; /* Wakeup(_PRW) supported? */ 151 u32 wake_capable:1; /* Wakeup(_PRW) supported? */
156 u32 force_power_state:1; 152 u32 force_power_state:1;
157 u32 reserved:19; 153 u32 reserved:22;
158}; 154};
159 155
160/* File System */ 156/* File System */
@@ -172,20 +168,23 @@ typedef unsigned long acpi_bus_address;
172typedef char acpi_device_name[40]; 168typedef char acpi_device_name[40];
173typedef char acpi_device_class[20]; 169typedef char acpi_device_class[20];
174 170
171struct acpi_hardware_id {
172 struct list_head list;
173 char *id;
174};
175
175struct acpi_device_pnp { 176struct acpi_device_pnp {
176 acpi_bus_id bus_id; /* Object name */ 177 acpi_bus_id bus_id; /* Object name */
177 acpi_bus_address bus_address; /* _ADR */ 178 acpi_bus_address bus_address; /* _ADR */
178 char *hardware_id; /* _HID */
179 struct acpica_device_id_list *cid_list; /* _CIDs */
180 char *unique_id; /* _UID */ 179 char *unique_id; /* _UID */
180 struct list_head ids; /* _HID and _CIDs */
181 acpi_device_name device_name; /* Driver-determined */ 181 acpi_device_name device_name; /* Driver-determined */
182 acpi_device_class device_class; /* " */ 182 acpi_device_class device_class; /* " */
183}; 183};
184 184
185#define acpi_device_bid(d) ((d)->pnp.bus_id) 185#define acpi_device_bid(d) ((d)->pnp.bus_id)
186#define acpi_device_adr(d) ((d)->pnp.bus_address) 186#define acpi_device_adr(d) ((d)->pnp.bus_address)
187#define acpi_device_hid(d) ((d)->pnp.hardware_id) 187char *acpi_device_hid(struct acpi_device *device);
188#define acpi_device_uid(d) ((d)->pnp.unique_id)
189#define acpi_device_name(d) ((d)->pnp.device_name) 188#define acpi_device_name(d) ((d)->pnp.device_name)
190#define acpi_device_class(d) ((d)->pnp.device_class) 189#define acpi_device_class(d) ((d)->pnp.device_class)
191 190
@@ -262,7 +261,8 @@ struct acpi_device_wakeup {
262/* Device */ 261/* Device */
263 262
264struct acpi_device { 263struct acpi_device {
265 acpi_handle handle; 264 int device_type;
265 acpi_handle handle; /* no handle for fixed hardware */
266 struct acpi_device *parent; 266 struct acpi_device *parent;
267 struct list_head children; 267 struct list_head children;
268 struct list_head node; 268 struct list_head node;
@@ -322,6 +322,8 @@ extern void unregister_acpi_bus_notifier(struct notifier_block *nb);
322 322
323int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device); 323int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
324void acpi_bus_data_handler(acpi_handle handle, void *context); 324void acpi_bus_data_handler(acpi_handle handle, void *context);
325acpi_status acpi_bus_get_status_handle(acpi_handle handle,
326 unsigned long long *sta);
325int acpi_bus_get_status(struct acpi_device *device); 327int acpi_bus_get_status(struct acpi_device *device);
326int acpi_bus_get_power(acpi_handle handle, int *state); 328int acpi_bus_get_power(acpi_handle handle, int *state);
327int acpi_bus_set_power(acpi_handle handle, int state); 329int acpi_bus_set_power(acpi_handle handle, int state);
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0ee33c2e6129..b449e738533a 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -101,7 +101,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
101 const char *fmt, ...); 101 const char *fmt, ...);
102int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 102int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
103void bdi_unregister(struct backing_dev_info *bdi); 103void bdi_unregister(struct backing_dev_info *bdi);
104void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); 104void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
105 long nr_pages);
105int bdi_writeback_task(struct bdi_writeback *wb); 106int bdi_writeback_task(struct bdi_writeback *wb);
106int bdi_has_dirty_io(struct backing_dev_info *bdi); 107int bdi_has_dirty_io(struct backing_dev_info *bdi);
107 108
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 79ca2da81c87..0f5f57858a23 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -27,6 +27,11 @@
27 * Any other use of the locks below is probably wrong. 27 * Any other use of the locks below is probably wrong.
28 */ 28 */
29 29
30enum backlight_update_reason {
31 BACKLIGHT_UPDATE_HOTKEY,
32 BACKLIGHT_UPDATE_SYSFS,
33};
34
30struct backlight_device; 35struct backlight_device;
31struct fb_info; 36struct fb_info;
32 37
@@ -100,6 +105,8 @@ static inline void backlight_update_status(struct backlight_device *bd)
100extern struct backlight_device *backlight_device_register(const char *name, 105extern struct backlight_device *backlight_device_register(const char *name,
101 struct device *dev, void *devdata, struct backlight_ops *ops); 106 struct device *dev, void *devdata, struct backlight_ops *ops);
102extern void backlight_device_unregister(struct backlight_device *bd); 107extern void backlight_device_unregister(struct backlight_device *bd);
108extern void backlight_force_update(struct backlight_device *bd,
109 enum backlight_update_reason reason);
103 110
104#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) 111#define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
105 112
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index b8826107b518..3b1594d662b0 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -78,8 +78,6 @@ struct ipc_kludge {
78#define IPCCALL(version,op) ((version)<<16 | (op)) 78#define IPCCALL(version,op) ((version)<<16 | (op))
79 79
80#ifdef __KERNEL__ 80#ifdef __KERNEL__
81
82#include <linux/kref.h>
83#include <linux/spinlock.h> 81#include <linux/spinlock.h>
84 82
85#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */ 83#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 0cef6badd6fb..b0cb0ebad9e6 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -16,7 +16,6 @@
16#define _KREF_H_ 16#define _KREF_H_
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/atomic.h>
20 19
21struct kref { 20struct kref {
22 atomic_t refcount; 21 atomic_t refcount;
diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h
new file mode 100644
index 000000000000..6bc090d0e3ac
--- /dev/null
+++ b/include/linux/mfd/wm831x/status.h
@@ -0,0 +1,34 @@
1/*
2 * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x
3 *
4 * Copyright 2009 Wolfson Microelectronics PLC.
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#ifndef __MFD_WM831X_STATUS_H__
16#define __MFD_WM831X_STATUS_H__
17
18#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */
19#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */
20#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */
21#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */
22#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */
23#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */
24#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */
25#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */
26#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */
27#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */
28#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */
29#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */
30#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */
31#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */
32#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */
33
34#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f6b90240dd41..d09db1bc9083 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -40,7 +40,6 @@
40#ifdef __KERNEL__ 40#ifdef __KERNEL__
41 41
42#include <linux/in.h> 42#include <linux/in.h>
43#include <linux/kref.h>
44#include <linux/mm.h> 43#include <linux/mm.h>
45#include <linux/pagemap.h> 44#include <linux/pagemap.h>
46#include <linux/rbtree.h> 45#include <linux/rbtree.h>
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 368bd70f1d2d..7b7fbf433cff 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -361,7 +361,7 @@ enum perf_event_type {
361 * struct perf_event_header header; 361 * struct perf_event_header header;
362 * u32 pid, ppid; 362 * u32 pid, ppid;
363 * u32 tid, ptid; 363 * u32 tid, ptid;
364 * { u64 time; } && PERF_SAMPLE_TIME 364 * u64 time;
365 * }; 365 * };
366 */ 366 */
367 PERF_EVENT_FORK = 7, 367 PERF_EVENT_FORK = 7,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index acefaf71e6dd..3a9d36d1e92a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -357,7 +357,7 @@ enum perf_event_type {
357 * struct perf_event_header header; 357 * struct perf_event_header header;
358 * u32 pid, ppid; 358 * u32 pid, ppid;
359 * u32 tid, ptid; 359 * u32 tid, ptid;
360 * { u64 time; } && PERF_SAMPLE_TIME 360 * u64 time;
361 * }; 361 * };
362 */ 362 */
363 PERF_RECORD_FORK = 7, 363 PERF_RECORD_FORK = 7,
diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h
new file mode 100644
index 000000000000..555d254e6606
--- /dev/null
+++ b/include/linux/spi/lms283gf05.h
@@ -0,0 +1,28 @@
1/*
2 * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD
3 *
4 * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/
19
20#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
21#define _INCLUDE_LINUX_SPI_LMS283GF05_H_
22
23struct lms283gf05_pdata {
24 unsigned long reset_gpio;
25 bool reset_inverted;
26};
27
28#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 660a9de96f81..2aac8a83e89b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -36,7 +36,7 @@ struct tracepoint {
36#ifndef DECLARE_TRACE 36#ifndef DECLARE_TRACE
37 37
38#define TP_PROTO(args...) args 38#define TP_PROTO(args...) args
39#define TP_ARGS(args...) args 39#define TP_ARGS(args...) args
40 40
41#ifdef CONFIG_TRACEPOINTS 41#ifdef CONFIG_TRACEPOINTS
42 42
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index fcfd9a1e4b96..e4612dbd7ba6 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -26,7 +26,7 @@ TRACE_EVENT(workqueue_insertion,
26 __entry->func = work->func; 26 __entry->func = work->func;
27 ), 27 ),
28 28
29 TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, 29 TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
30 __entry->thread_pid, __entry->func) 30 __entry->thread_pid, __entry->func)
31); 31);
32 32
@@ -48,7 +48,7 @@ TRACE_EVENT(workqueue_execution,
48 __entry->func = work->func; 48 __entry->func = work->func;
49 ), 49 ),
50 50
51 TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, 51 TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
52 __entry->thread_pid, __entry->func) 52 __entry->thread_pid, __entry->func)
53); 53);
54 54
diff --git a/kernel/futex.c b/kernel/futex.c
index 248dd119a86e..b911adceb2c4 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -89,36 +89,36 @@ struct futex_pi_state {
89 union futex_key key; 89 union futex_key key;
90}; 90};
91 91
92/* 92/**
93 * We use this hashed waitqueue instead of a normal wait_queue_t, so 93 * struct futex_q - The hashed futex queue entry, one per waiting task
94 * @task: the task waiting on the futex
95 * @lock_ptr: the hash bucket lock
96 * @key: the key the futex is hashed on
97 * @pi_state: optional priority inheritance state
98 * @rt_waiter: rt_waiter storage for use with requeue_pi
99 * @requeue_pi_key: the requeue_pi target futex key
100 * @bitset: bitset for the optional bitmasked wakeup
101 *
102 * We use this hashed waitqueue, instead of a normal wait_queue_t, so
94 * we can wake only the relevant ones (hashed queues may be shared). 103 * we can wake only the relevant ones (hashed queues may be shared).
95 * 104 *
96 * A futex_q has a woken state, just like tasks have TASK_RUNNING. 105 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
97 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. 106 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
98 * The order of wakup is always to make the first condition true, then 107 * The order of wakup is always to make the first condition true, then
99 * wake up q->waiter, then make the second condition true. 108 * the second.
109 *
110 * PI futexes are typically woken before they are removed from the hash list via
111 * the rt_mutex code. See unqueue_me_pi().
100 */ 112 */
101struct futex_q { 113struct futex_q {
102 struct plist_node list; 114 struct plist_node list;
103 /* Waiter reference */
104 struct task_struct *task;
105 115
106 /* Which hash list lock to use: */ 116 struct task_struct *task;
107 spinlock_t *lock_ptr; 117 spinlock_t *lock_ptr;
108
109 /* Key which the futex is hashed on: */
110 union futex_key key; 118 union futex_key key;
111
112 /* Optional priority inheritance state: */
113 struct futex_pi_state *pi_state; 119 struct futex_pi_state *pi_state;
114
115 /* rt_waiter storage for requeue_pi: */
116 struct rt_mutex_waiter *rt_waiter; 120 struct rt_mutex_waiter *rt_waiter;
117
118 /* The expected requeue pi target futex key: */
119 union futex_key *requeue_pi_key; 121 union futex_key *requeue_pi_key;
120
121 /* Bitset for the optional bitmasked wakeup */
122 u32 bitset; 122 u32 bitset;
123}; 123};
124 124
@@ -198,11 +198,12 @@ static void drop_futex_key_refs(union futex_key *key)
198} 198}
199 199
200/** 200/**
201 * get_futex_key - Get parameters which are the keys for a futex. 201 * get_futex_key() - Get parameters which are the keys for a futex
202 * @uaddr: virtual address of the futex 202 * @uaddr: virtual address of the futex
203 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 203 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
204 * @key: address where result is stored. 204 * @key: address where result is stored.
205 * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE) 205 * @rw: mapping needs to be read/write (values: VERIFY_READ,
206 * VERIFY_WRITE)
206 * 207 *
207 * Returns a negative error code or 0 208 * Returns a negative error code or 0
208 * The key words are stored in *key on success. 209 * The key words are stored in *key on success.
@@ -288,8 +289,8 @@ void put_futex_key(int fshared, union futex_key *key)
288 drop_futex_key_refs(key); 289 drop_futex_key_refs(key);
289} 290}
290 291
291/* 292/**
292 * fault_in_user_writeable - fault in user address and verify RW access 293 * fault_in_user_writeable() - Fault in user address and verify RW access
293 * @uaddr: pointer to faulting user space address 294 * @uaddr: pointer to faulting user space address
294 * 295 *
295 * Slow path to fixup the fault we just took in the atomic write 296 * Slow path to fixup the fault we just took in the atomic write
@@ -309,8 +310,8 @@ static int fault_in_user_writeable(u32 __user *uaddr)
309 310
310/** 311/**
311 * futex_top_waiter() - Return the highest priority waiter on a futex 312 * futex_top_waiter() - Return the highest priority waiter on a futex
312 * @hb: the hash bucket the futex_q's reside in 313 * @hb: the hash bucket the futex_q's reside in
313 * @key: the futex key (to distinguish it from other futex futex_q's) 314 * @key: the futex key (to distinguish it from other futex futex_q's)
314 * 315 *
315 * Must be called with the hb lock held. 316 * Must be called with the hb lock held.
316 */ 317 */
@@ -588,7 +589,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
588} 589}
589 590
590/** 591/**
591 * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex 592 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
592 * @uaddr: the pi futex user address 593 * @uaddr: the pi futex user address
593 * @hb: the pi futex hash bucket 594 * @hb: the pi futex hash bucket
594 * @key: the futex key associated with uaddr and hb 595 * @key: the futex key associated with uaddr and hb
@@ -1011,9 +1012,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1011 1012
1012/** 1013/**
1013 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 1014 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1014 * q: the futex_q 1015 * @q: the futex_q
1015 * key: the key of the requeue target futex 1016 * @key: the key of the requeue target futex
1016 * hb: the hash_bucket of the requeue target futex 1017 * @hb: the hash_bucket of the requeue target futex
1017 * 1018 *
1018 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 1019 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1019 * target futex if it is uncontended or via a lock steal. Set the futex_q key 1020 * target futex if it is uncontended or via a lock steal. Set the futex_q key
@@ -1350,6 +1351,25 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1350 return hb; 1351 return hb;
1351} 1352}
1352 1353
1354static inline void
1355queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1356{
1357 spin_unlock(&hb->lock);
1358 drop_futex_key_refs(&q->key);
1359}
1360
1361/**
1362 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1363 * @q: The futex_q to enqueue
1364 * @hb: The destination hash bucket
1365 *
1366 * The hb->lock must be held by the caller, and is released here. A call to
1367 * queue_me() is typically paired with exactly one call to unqueue_me(). The
1368 * exceptions involve the PI related operations, which may use unqueue_me_pi()
1369 * or nothing if the unqueue is done as part of the wake process and the unqueue
1370 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1371 * an example).
1372 */
1353static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) 1373static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1354{ 1374{
1355 int prio; 1375 int prio;
@@ -1373,19 +1393,17 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1373 spin_unlock(&hb->lock); 1393 spin_unlock(&hb->lock);
1374} 1394}
1375 1395
1376static inline void 1396/**
1377queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) 1397 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1378{ 1398 * @q: The futex_q to unqueue
1379 spin_unlock(&hb->lock); 1399 *
1380 drop_futex_key_refs(&q->key); 1400 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1381} 1401 * be paired with exactly one earlier call to queue_me().
1382 1402 *
1383/* 1403 * Returns:
1384 * queue_me and unqueue_me must be called as a pair, each 1404 * 1 - if the futex_q was still queued (and we removed unqueued it)
1385 * exactly once. They are called with the hashed spinlock held. 1405 * 0 - if the futex_q was already removed by the waking thread
1386 */ 1406 */
1387
1388/* Return 1 if we were still queued (ie. 0 means we were woken) */
1389static int unqueue_me(struct futex_q *q) 1407static int unqueue_me(struct futex_q *q)
1390{ 1408{
1391 spinlock_t *lock_ptr; 1409 spinlock_t *lock_ptr;
@@ -1638,17 +1656,14 @@ out:
1638static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, 1656static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1639 struct hrtimer_sleeper *timeout) 1657 struct hrtimer_sleeper *timeout)
1640{ 1658{
1641 queue_me(q, hb);
1642
1643 /* 1659 /*
1644 * There might have been scheduling since the queue_me(), as we 1660 * The task state is guaranteed to be set before another task can
1645 * cannot hold a spinlock across the get_user() in case it 1661 * wake it. set_current_state() is implemented using set_mb() and
1646 * faults, and we cannot just set TASK_INTERRUPTIBLE state when 1662 * queue_me() calls spin_unlock() upon completion, both serializing
1647 * queueing ourselves into the futex hash. This code thus has to 1663 * access to the hash list and forcing another memory barrier.
1648 * rely on the futex_wake() code removing us from hash when it
1649 * wakes us up.
1650 */ 1664 */
1651 set_current_state(TASK_INTERRUPTIBLE); 1665 set_current_state(TASK_INTERRUPTIBLE);
1666 queue_me(q, hb);
1652 1667
1653 /* Arm the timer */ 1668 /* Arm the timer */
1654 if (timeout) { 1669 if (timeout) {
@@ -1658,8 +1673,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1658 } 1673 }
1659 1674
1660 /* 1675 /*
1661 * !plist_node_empty() is safe here without any lock. 1676 * If we have been removed from the hash list, then another task
1662 * q.lock_ptr != 0 is not safe, because of ordering against wakeup. 1677 * has tried to wake us, and we can skip the call to schedule().
1663 */ 1678 */
1664 if (likely(!plist_node_empty(&q->list))) { 1679 if (likely(!plist_node_empty(&q->list))) {
1665 /* 1680 /*
@@ -2114,12 +2129,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2114 2129
2115/** 2130/**
2116 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 2131 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2117 * @uaddr: the futex we initialyl wait on (non-pi) 2132 * @uaddr: the futex we initially wait on (non-pi)
2118 * @fshared: whether the futexes are shared (1) or not (0). They must be 2133 * @fshared: whether the futexes are shared (1) or not (0). They must be
2119 * the same type, no requeueing from private to shared, etc. 2134 * the same type, no requeueing from private to shared, etc.
2120 * @val: the expected value of uaddr 2135 * @val: the expected value of uaddr
2121 * @abs_time: absolute timeout 2136 * @abs_time: absolute timeout
2122 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all. 2137 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2123 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) 2138 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2124 * @uaddr2: the pi futex we will take prior to returning to user-space 2139 * @uaddr2: the pi futex we will take prior to returning to user-space
2125 * 2140 *
@@ -2246,7 +2261,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2246 res = fixup_owner(uaddr2, fshared, &q, !ret); 2261 res = fixup_owner(uaddr2, fshared, &q, !ret);
2247 /* 2262 /*
2248 * If fixup_owner() returned an error, proprogate that. If it 2263 * If fixup_owner() returned an error, proprogate that. If it
2249 * acquired the lock, clear our -ETIMEDOUT or -EINTR. 2264 * acquired the lock, clear -ETIMEDOUT or -EINTR.
2250 */ 2265 */
2251 if (res) 2266 if (res)
2252 ret = (res < 0) ? res : 0; 2267 ret = (res < 0) ? res : 0;
@@ -2302,9 +2317,9 @@ out:
2302 */ 2317 */
2303 2318
2304/** 2319/**
2305 * sys_set_robust_list - set the robust-futex list head of a task 2320 * sys_set_robust_list() - Set the robust-futex list head of a task
2306 * @head: pointer to the list-head 2321 * @head: pointer to the list-head
2307 * @len: length of the list-head, as userspace expects 2322 * @len: length of the list-head, as userspace expects
2308 */ 2323 */
2309SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, 2324SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2310 size_t, len) 2325 size_t, len)
@@ -2323,10 +2338,10 @@ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2323} 2338}
2324 2339
2325/** 2340/**
2326 * sys_get_robust_list - get the robust-futex list head of a task 2341 * sys_get_robust_list() - Get the robust-futex list head of a task
2327 * @pid: pid of the process [zero for current task] 2342 * @pid: pid of the process [zero for current task]
2328 * @head_ptr: pointer to a list-head pointer, the kernel fills it in 2343 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
2329 * @len_ptr: pointer to a length field, the kernel fills in the header size 2344 * @len_ptr: pointer to a length field, the kernel fills in the header size
2330 */ 2345 */
2331SYSCALL_DEFINE3(get_robust_list, int, pid, 2346SYSCALL_DEFINE3(get_robust_list, int, pid,
2332 struct robust_list_head __user * __user *, head_ptr, 2347 struct robust_list_head __user * __user *, head_ptr,
diff --git a/kernel/module.c b/kernel/module.c
index 5a29397ca4b6..fe748a86d452 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3091,7 +3091,6 @@ void module_layout(struct module *mod,
3091 struct modversion_info *ver, 3091 struct modversion_info *ver,
3092 struct kernel_param *kp, 3092 struct kernel_param *kp,
3093 struct kernel_symbol *ks, 3093 struct kernel_symbol *ks,
3094 struct marker *marker,
3095 struct tracepoint *tp) 3094 struct tracepoint *tp)
3096{ 3095{
3097} 3096}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 09113347d328..5e18c6ab2c6a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -394,15 +394,11 @@ void clocksource_resume(void)
394{ 394{
395 struct clocksource *cs; 395 struct clocksource *cs;
396 396
397 mutex_lock(&clocksource_mutex);
398
399 list_for_each_entry(cs, &clocksource_list, list) 397 list_for_each_entry(cs, &clocksource_list, list)
400 if (cs->resume) 398 if (cs->resume)
401 cs->resume(); 399 cs->resume();
402 400
403 clocksource_resume_watchdog(); 401 clocksource_resume_watchdog();
404
405 mutex_unlock(&clocksource_mutex);
406} 402}
407 403
408/** 404/**
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a142579765bf..46592feab5a6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1621,8 +1621,10 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1621 if (!ret) { 1621 if (!ret) {
1622 struct seq_file *m = file->private_data; 1622 struct seq_file *m = file->private_data;
1623 m->private = iter; 1623 m->private = iter;
1624 } else 1624 } else {
1625 trace_parser_put(&iter->parser);
1625 kfree(iter); 1626 kfree(iter);
1627 }
1626 } else 1628 } else
1627 file->private_data = iter; 1629 file->private_data = iter;
1628 mutex_unlock(&ftrace_regex_lock); 1630 mutex_unlock(&ftrace_regex_lock);
@@ -2202,7 +2204,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2202 struct trace_parser *parser; 2204 struct trace_parser *parser;
2203 ssize_t ret, read; 2205 ssize_t ret, read;
2204 2206
2205 if (!cnt || cnt < 0) 2207 if (!cnt)
2206 return 0; 2208 return 0;
2207 2209
2208 mutex_lock(&ftrace_regex_lock); 2210 mutex_lock(&ftrace_regex_lock);
@@ -2216,7 +2218,7 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2216 parser = &iter->parser; 2218 parser = &iter->parser;
2217 read = trace_get_user(parser, ubuf, cnt, ppos); 2219 read = trace_get_user(parser, ubuf, cnt, ppos);
2218 2220
2219 if (trace_parser_loaded(parser) && 2221 if (read >= 0 && trace_parser_loaded(parser) &&
2220 !trace_parser_cont(parser)) { 2222 !trace_parser_cont(parser)) {
2221 ret = ftrace_process_regex(parser->buffer, 2223 ret = ftrace_process_regex(parser->buffer,
2222 parser->idx, enable); 2224 parser->idx, enable);
@@ -2552,8 +2554,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2552 size_t cnt, loff_t *ppos) 2554 size_t cnt, loff_t *ppos)
2553{ 2555{
2554 struct trace_parser parser; 2556 struct trace_parser parser;
2555 size_t read = 0; 2557 ssize_t read, ret;
2556 ssize_t ret;
2557 2558
2558 if (!cnt || cnt < 0) 2559 if (!cnt || cnt < 0)
2559 return 0; 2560 return 0;
@@ -2562,29 +2563,31 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2562 2563
2563 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { 2564 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2564 ret = -EBUSY; 2565 ret = -EBUSY;
2565 goto out; 2566 goto out_unlock;
2566 } 2567 }
2567 2568
2568 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2569 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2569 ret = -ENOMEM; 2570 ret = -ENOMEM;
2570 goto out; 2571 goto out_unlock;
2571 } 2572 }
2572 2573
2573 read = trace_get_user(&parser, ubuf, cnt, ppos); 2574 read = trace_get_user(&parser, ubuf, cnt, ppos);
2574 2575
2575 if (trace_parser_loaded((&parser))) { 2576 if (read >= 0 && trace_parser_loaded((&parser))) {
2576 parser.buffer[parser.idx] = 0; 2577 parser.buffer[parser.idx] = 0;
2577 2578
2578 /* we allow only one expression at a time */ 2579 /* we allow only one expression at a time */
2579 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 2580 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2580 parser.buffer); 2581 parser.buffer);
2581 if (ret) 2582 if (ret)
2582 goto out; 2583 goto out_free;
2583 } 2584 }
2584 2585
2585 ret = read; 2586 ret = read;
2586 out: 2587
2588out_free:
2587 trace_parser_put(&parser); 2589 trace_parser_put(&parser);
2590out_unlock:
2588 mutex_unlock(&graph_lock); 2591 mutex_unlock(&graph_lock);
2589 2592
2590 return ret; 2593 return ret;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 411af37f4be4..45068269ebb1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -415,7 +415,7 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
415 415
416 /* read the non-space input */ 416 /* read the non-space input */
417 while (cnt && !isspace(ch)) { 417 while (cnt && !isspace(ch)) {
418 if (parser->idx < parser->size) 418 if (parser->idx < parser->size - 1)
419 parser->buffer[parser->idx++] = ch; 419 parser->buffer[parser->idx++] = ch;
420 else { 420 else {
421 ret = -EINVAL; 421 ret = -EINVAL;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 6f03c8a1105e..d128f65778e6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -232,10 +232,9 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
232 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
233{ 233{
234 struct trace_parser parser; 234 struct trace_parser parser;
235 size_t read = 0; 235 ssize_t read, ret;
236 ssize_t ret;
237 236
238 if (!cnt || cnt < 0) 237 if (!cnt)
239 return 0; 238 return 0;
240 239
241 ret = tracing_update_buffers(); 240 ret = tracing_update_buffers();
@@ -247,7 +246,7 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
247 246
248 read = trace_get_user(&parser, ubuf, cnt, ppos); 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
249 248
250 if (trace_parser_loaded((&parser))) { 249 if (read >= 0 && trace_parser_loaded((&parser))) {
251 int set = 1; 250 int set = 1;
252 251
253 if (*parser.buffer == '!') 252 if (*parser.buffer == '!')
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d99664e8607e..a3b14090b1fb 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -44,18 +44,21 @@ static long ratelimit_pages = 32;
44/* 44/*
45 * When balance_dirty_pages decides that the caller needs to perform some 45 * When balance_dirty_pages decides that the caller needs to perform some
46 * non-background writeback, this is how many pages it will attempt to write. 46 * non-background writeback, this is how many pages it will attempt to write.
47 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably 47 * It should be somewhat larger than dirtied pages to ensure that reasonably
48 * large amounts of I/O are submitted. 48 * large amounts of I/O are submitted.
49 */ 49 */
50static inline long sync_writeback_pages(void) 50static inline long sync_writeback_pages(unsigned long dirtied)
51{ 51{
52 return ratelimit_pages + ratelimit_pages / 2; 52 if (dirtied < ratelimit_pages)
53 dirtied = ratelimit_pages;
54
55 return dirtied + dirtied / 2;
53} 56}
54 57
55/* The following parameters are exported via /proc/sys/vm */ 58/* The following parameters are exported via /proc/sys/vm */
56 59
57/* 60/*
58 * Start background writeback (via pdflush) at this percentage 61 * Start background writeback (via writeback threads) at this percentage
59 */ 62 */
60int dirty_background_ratio = 10; 63int dirty_background_ratio = 10;
61 64
@@ -474,10 +477,11 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
474 * balance_dirty_pages() must be called by processes which are generating dirty 477 * balance_dirty_pages() must be called by processes which are generating dirty
475 * data. It looks at the number of dirty pages in the machine and will force 478 * data. It looks at the number of dirty pages in the machine and will force
476 * the caller to perform writeback if the system is over `vm_dirty_ratio'. 479 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
477 * If we're over `background_thresh' then pdflush is woken to perform some 480 * If we're over `background_thresh' then the writeback threads are woken to
478 * writeout. 481 * perform some writeout.
479 */ 482 */
480static void balance_dirty_pages(struct address_space *mapping) 483static void balance_dirty_pages(struct address_space *mapping,
484 unsigned long write_chunk)
481{ 485{
482 long nr_reclaimable, bdi_nr_reclaimable; 486 long nr_reclaimable, bdi_nr_reclaimable;
483 long nr_writeback, bdi_nr_writeback; 487 long nr_writeback, bdi_nr_writeback;
@@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping)
485 unsigned long dirty_thresh; 489 unsigned long dirty_thresh;
486 unsigned long bdi_thresh; 490 unsigned long bdi_thresh;
487 unsigned long pages_written = 0; 491 unsigned long pages_written = 0;
488 unsigned long write_chunk = sync_writeback_pages();
489 unsigned long pause = 1; 492 unsigned long pause = 1;
490 493
491 struct backing_dev_info *bdi = mapping->backing_dev_info; 494 struct backing_dev_info *bdi = mapping->backing_dev_info;
@@ -579,7 +582,7 @@ static void balance_dirty_pages(struct address_space *mapping)
579 bdi->dirty_exceeded = 0; 582 bdi->dirty_exceeded = 0;
580 583
581 if (writeback_in_progress(bdi)) 584 if (writeback_in_progress(bdi))
582 return; /* pdflush is already working this queue */ 585 return;
583 586
584 /* 587 /*
585 * In laptop mode, we wait until hitting the higher threshold before 588 * In laptop mode, we wait until hitting the higher threshold before
@@ -590,10 +593,10 @@ static void balance_dirty_pages(struct address_space *mapping)
590 * background_thresh, to keep the amount of dirty memory low. 593 * background_thresh, to keep the amount of dirty memory low.
591 */ 594 */
592 if ((laptop_mode && pages_written) || 595 if ((laptop_mode && pages_written) ||
593 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) 596 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
594 + global_page_state(NR_UNSTABLE_NFS)) 597 + global_page_state(NR_UNSTABLE_NFS))
595 > background_thresh))) 598 > background_thresh)))
596 bdi_start_writeback(bdi, nr_writeback); 599 bdi_start_writeback(bdi, NULL, 0);
597} 600}
598 601
599void set_page_dirty_balance(struct page *page, int page_mkwrite) 602void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -640,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
640 p = &__get_cpu_var(bdp_ratelimits); 643 p = &__get_cpu_var(bdp_ratelimits);
641 *p += nr_pages_dirtied; 644 *p += nr_pages_dirtied;
642 if (unlikely(*p >= ratelimit)) { 645 if (unlikely(*p >= ratelimit)) {
646 ratelimit = sync_writeback_pages(*p);
643 *p = 0; 647 *p = 0;
644 preempt_enable(); 648 preempt_enable();
645 balance_dirty_pages(mapping); 649 balance_dirty_pages(mapping, ratelimit);
646 return; 650 return;
647 } 651 }
648 preempt_enable(); 652 preempt_enable();
diff --git a/mm/shmem.c b/mm/shmem.c
index 98631c26c200..ccf446a9faa1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1046,8 +1046,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1046 * sync from ever calling shmem_writepage; but a stacking filesystem 1046 * sync from ever calling shmem_writepage; but a stacking filesystem
1047 * may use the ->writepage of its underlying filesystem, in which case 1047 * may use the ->writepage of its underlying filesystem, in which case
1048 * tmpfs should write out to swap only in response to memory pressure, 1048 * tmpfs should write out to swap only in response to memory pressure,
1049 * and not for pdflush or sync. However, in those cases, we do still 1049 * and not for the writeback threads or sync. However, in those cases,
1050 * want to check if there's a redundant swappage to be discarded. 1050 * we do still want to check if there's a redundant swappage to be
1051 * discarded.
1051 */ 1052 */
1052 if (wbc->for_reclaim) 1053 if (wbc->for_reclaim)
1053 swap = get_swap_page(); 1054 swap = get_swap_page();
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1219ceb8a9b2..64e438898832 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1709,10 +1709,10 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1709 * 1709 *
1710 * If the caller is !__GFP_FS then the probability of a failure is reasonably 1710 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1711 * high - the zone may be full of dirty or under-writeback pages, which this 1711 * high - the zone may be full of dirty or under-writeback pages, which this
1712 * caller can't do much about. We kick pdflush and take explicit naps in the 1712 * caller can't do much about. We kick the writeback threads and take explicit
1713 * hope that some of these pages can be written. But if the allocating task 1713 * naps in the hope that some of these pages can be written. But if the
1714 * holds filesystem locks which prevent writeout this might not work, and the 1714 * allocating task holds filesystem locks which prevent writeout this might not
1715 * allocation attempt will fail. 1715 * work, and the allocation attempt will fail.
1716 * 1716 *
1717 * returns: 0, if no pages reclaimed 1717 * returns: 0, if no pages reclaimed
1718 * else, the number of pages reclaimed 1718 * else, the number of pages reclaimed
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index d6b1b054e294..fbcac76fdc0d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -358,6 +358,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
358 ax25_dev *ax25_dev; 358 ax25_dev *ax25_dev;
359 ax25_cb *ax25; 359 ax25_cb *ax25;
360 unsigned int k; 360 unsigned int k;
361 int ret = 0;
361 362
362 if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) 363 if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
363 return -EFAULT; 364 return -EFAULT;
@@ -388,57 +389,63 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg)
388 case AX25_WINDOW: 389 case AX25_WINDOW:
389 if (ax25->modulus == AX25_MODULUS) { 390 if (ax25->modulus == AX25_MODULUS) {
390 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7) 391 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
391 return -EINVAL; 392 goto einval_put;
392 } else { 393 } else {
393 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63) 394 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
394 return -EINVAL; 395 goto einval_put;
395 } 396 }
396 ax25->window = ax25_ctl.arg; 397 ax25->window = ax25_ctl.arg;
397 break; 398 break;
398 399
399 case AX25_T1: 400 case AX25_T1:
400 if (ax25_ctl.arg < 1) 401 if (ax25_ctl.arg < 1)
401 return -EINVAL; 402 goto einval_put;
402 ax25->rtt = (ax25_ctl.arg * HZ) / 2; 403 ax25->rtt = (ax25_ctl.arg * HZ) / 2;
403 ax25->t1 = ax25_ctl.arg * HZ; 404 ax25->t1 = ax25_ctl.arg * HZ;
404 break; 405 break;
405 406
406 case AX25_T2: 407 case AX25_T2:
407 if (ax25_ctl.arg < 1) 408 if (ax25_ctl.arg < 1)
408 return -EINVAL; 409 goto einval_put;
409 ax25->t2 = ax25_ctl.arg * HZ; 410 ax25->t2 = ax25_ctl.arg * HZ;
410 break; 411 break;
411 412
412 case AX25_N2: 413 case AX25_N2:
413 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31) 414 if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
414 return -EINVAL; 415 goto einval_put;
415 ax25->n2count = 0; 416 ax25->n2count = 0;
416 ax25->n2 = ax25_ctl.arg; 417 ax25->n2 = ax25_ctl.arg;
417 break; 418 break;
418 419
419 case AX25_T3: 420 case AX25_T3:
420 if (ax25_ctl.arg < 0) 421 if (ax25_ctl.arg < 0)
421 return -EINVAL; 422 goto einval_put;
422 ax25->t3 = ax25_ctl.arg * HZ; 423 ax25->t3 = ax25_ctl.arg * HZ;
423 break; 424 break;
424 425
425 case AX25_IDLE: 426 case AX25_IDLE:
426 if (ax25_ctl.arg < 0) 427 if (ax25_ctl.arg < 0)
427 return -EINVAL; 428 goto einval_put;
428 ax25->idle = ax25_ctl.arg * 60 * HZ; 429 ax25->idle = ax25_ctl.arg * 60 * HZ;
429 break; 430 break;
430 431
431 case AX25_PACLEN: 432 case AX25_PACLEN:
432 if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535) 433 if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
433 return -EINVAL; 434 goto einval_put;
434 ax25->paclen = ax25_ctl.arg; 435 ax25->paclen = ax25_ctl.arg;
435 break; 436 break;
436 437
437 default: 438 default:
438 return -EINVAL; 439 goto einval_put;
439 } 440 }
440 441
441 return 0; 442out_put:
443 ax25_cb_put(ax25);
444 return ret;
445
446einval_put:
447 ret = -EINVAL;
448 goto out_put;
442} 449}
443 450
444static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev) 451static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index d69a759a1046..0854f110bf7f 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -10,6 +10,7 @@ perf-stat
10perf-top 10perf-top
11perf*.1 11perf*.1
12perf*.xml 12perf*.xml
13perf*.html
13common-cmds.h 14common-cmds.h
14tags 15tags
15TAGS 16TAGS
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 16af2d82e858..e5f6ece65a13 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -338,14 +338,24 @@ static void nsec_printout(int counter, double avg)
338 338
339static void abs_printout(int counter, double avg) 339static void abs_printout(int counter, double avg)
340{ 340{
341 double total, ratio = 0.0;
342
341 fprintf(stderr, " %14.0f %-24s", avg, event_name(counter)); 343 fprintf(stderr, " %14.0f %-24s", avg, event_name(counter));
342 344
343 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) { 345 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
344 fprintf(stderr, " # %10.3f IPC ", 346 total = avg_stats(&runtime_cycles_stats);
345 avg / avg_stats(&runtime_cycles_stats)); 347
348 if (total)
349 ratio = avg / total;
350
351 fprintf(stderr, " # %10.3f IPC ", ratio);
346 } else { 352 } else {
347 fprintf(stderr, " # %10.3f M/sec", 353 total = avg_stats(&runtime_nsecs_stats);
348 1000.0 * avg / avg_stats(&runtime_nsecs_stats)); 354
355 if (total)
356 ratio = 1000.0 * avg / total;
357
358 fprintf(stderr, " # %10.3f M/sec", ratio);
349 } 359 }
350} 360}
351 361
diff --git a/tools/perf/util/module.c b/tools/perf/util/module.c
index 3d567fe59c79..0d8c85defcd2 100644
--- a/tools/perf/util/module.c
+++ b/tools/perf/util/module.c
@@ -4,6 +4,7 @@
4#include "module.h" 4#include "module.h"
5 5
6#include <libelf.h> 6#include <libelf.h>
7#include <libgen.h>
7#include <gelf.h> 8#include <gelf.h>
8#include <elf.h> 9#include <elf.h>
9#include <dirent.h> 10#include <dirent.h>
@@ -409,35 +410,40 @@ out_failure:
409static int mod_dso__load_module_paths(struct mod_dso *self) 410static int mod_dso__load_module_paths(struct mod_dso *self)
410{ 411{
411 struct utsname uts; 412 struct utsname uts;
412 int count = 0, len; 413 int count = 0, len, err = -1;
413 char *line = NULL; 414 char *line = NULL;
414 FILE *file; 415 FILE *file;
415 char *path; 416 char *dpath, *dir;
416 size_t n; 417 size_t n;
417 418
418 if (uname(&uts) < 0) 419 if (uname(&uts) < 0)
419 goto out_failure; 420 return err;
420 421
421 len = strlen("/lib/modules/"); 422 len = strlen("/lib/modules/");
422 len += strlen(uts.release); 423 len += strlen(uts.release);
423 len += strlen("/modules.dep"); 424 len += strlen("/modules.dep");
424 425
425 path = calloc(1, len); 426 dpath = calloc(1, len + 1);
426 if (path == NULL) 427 if (dpath == NULL)
427 goto out_failure; 428 return err;
428 429
429 strcat(path, "/lib/modules/"); 430 strcat(dpath, "/lib/modules/");
430 strcat(path, uts.release); 431 strcat(dpath, uts.release);
431 strcat(path, "/modules.dep"); 432 strcat(dpath, "/modules.dep");
432 433
433 file = fopen(path, "r"); 434 file = fopen(dpath, "r");
434 free(path);
435 if (file == NULL) 435 if (file == NULL)
436 goto out_failure; 436 goto out_failure;
437 437
438 dir = dirname(dpath);
439 if (!dir)
440 goto out_failure;
441 strcat(dir, "/");
442
438 while (!feof(file)) { 443 while (!feof(file)) {
439 char *name, *tmp;
440 struct module *module; 444 struct module *module;
445 char *name, *path, *tmp;
446 FILE *modfile;
441 int line_len; 447 int line_len;
442 448
443 line_len = getline(&line, &n, file); 449 line_len = getline(&line, &n, file);
@@ -445,17 +451,41 @@ static int mod_dso__load_module_paths(struct mod_dso *self)
445 break; 451 break;
446 452
447 if (!line) 453 if (!line)
448 goto out_failure; 454 break;
449 455
450 line[--line_len] = '\0'; /* \n */ 456 line[--line_len] = '\0'; /* \n */
451 457
452 path = strtok(line, ":"); 458 path = strchr(line, ':');
459 if (!path)
460 break;
461 *path = '\0';
462
463 path = strdup(line);
453 if (!path) 464 if (!path)
454 goto out_failure; 465 break;
466
467 if (!strstr(path, dir)) {
468 if (strncmp(path, "kernel/", 7))
469 break;
470
471 free(path);
472 path = calloc(1, strlen(dir) + strlen(line) + 1);
473 if (!path)
474 break;
475 strcat(path, dir);
476 strcat(path, line);
477 }
478
479 modfile = fopen(path, "r");
480 if (modfile == NULL)
481 break;
482 fclose(modfile);
455 483
456 name = strdup(path); 484 name = strdup(path);
457 name = strtok(name, "/"); 485 if (!name)
486 break;
458 487
488 name = strtok(name, "/");
459 tmp = name; 489 tmp = name;
460 490
461 while (tmp) { 491 while (tmp) {
@@ -463,26 +493,25 @@ static int mod_dso__load_module_paths(struct mod_dso *self)
463 if (tmp) 493 if (tmp)
464 name = tmp; 494 name = tmp;
465 } 495 }
496
466 name = strsep(&name, "."); 497 name = strsep(&name, ".");
498 if (!name)
499 break;
467 500
468 /* Quirk: replace '-' with '_' in sound modules */ 501 /* Quirk: replace '-' with '_' in all modules */
469 for (len = strlen(name); len; len--) { 502 for (len = strlen(name); len; len--) {
470 if (*(name+len) == '-') 503 if (*(name+len) == '-')
471 *(name+len) = '_'; 504 *(name+len) = '_';
472 } 505 }
473 506
474 module = module__new(name, path); 507 module = module__new(name, path);
475 if (!module) { 508 if (!module)
476 fprintf(stderr, "load_module_paths: allocation error\n"); 509 break;
477 goto out_failure;
478 }
479 mod_dso__insert_module(self, module); 510 mod_dso__insert_module(self, module);
480 511
481 module->sections = sec_dso__new_dso("sections"); 512 module->sections = sec_dso__new_dso("sections");
482 if (!module->sections) { 513 if (!module->sections)
483 fprintf(stderr, "load_module_paths: allocation error\n"); 514 break;
484 goto out_failure;
485 }
486 515
487 module->active = mod_dso__load_sections(module); 516 module->active = mod_dso__load_sections(module);
488 517
@@ -490,13 +519,20 @@ static int mod_dso__load_module_paths(struct mod_dso *self)
490 count++; 519 count++;
491 } 520 }
492 521
493 free(line); 522 if (feof(file))
494 fclose(file); 523 err = count;
495 524 else
496 return count; 525 fprintf(stderr, "load_module_paths: modules.dep parsing failure!\n");
497 526
498out_failure: 527out_failure:
499 return -1; 528 if (dpath)
529 free(dpath);
530 if (file)
531 fclose(file);
532 if (line)
533 free(line);
534
535 return err;
500} 536}
501 537
502int mod_dso__load_modules(struct mod_dso *dso) 538int mod_dso__load_modules(struct mod_dso *dso)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 13ab4b842d49..87c424de79ee 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -165,33 +165,31 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
165 DIR *sys_dir, *evt_dir; 165 DIR *sys_dir, *evt_dir;
166 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 166 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
167 char id_buf[4]; 167 char id_buf[4];
168 int sys_dir_fd, fd; 168 int fd;
169 u64 id; 169 u64 id;
170 char evt_path[MAXPATHLEN]; 170 char evt_path[MAXPATHLEN];
171 char dir_path[MAXPATHLEN];
171 172
172 if (valid_debugfs_mount(debugfs_path)) 173 if (valid_debugfs_mount(debugfs_path))
173 return NULL; 174 return NULL;
174 175
175 sys_dir = opendir(debugfs_path); 176 sys_dir = opendir(debugfs_path);
176 if (!sys_dir) 177 if (!sys_dir)
177 goto cleanup; 178 return NULL;
178 sys_dir_fd = dirfd(sys_dir);
179 179
180 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 180 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
181 int dfd = openat(sys_dir_fd, sys_dirent.d_name, 181
182 O_RDONLY|O_DIRECTORY), evt_dir_fd; 182 snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
183 if (dfd == -1) 183 sys_dirent.d_name);
184 continue; 184 evt_dir = opendir(dir_path);
185 evt_dir = fdopendir(dfd); 185 if (!evt_dir)
186 if (!evt_dir) {
187 close(dfd);
188 continue; 186 continue;
189 } 187
190 evt_dir_fd = dirfd(evt_dir);
191 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 188 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
192 snprintf(evt_path, MAXPATHLEN, "%s/id", 189
190 snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
193 evt_dirent.d_name); 191 evt_dirent.d_name);
194 fd = openat(evt_dir_fd, evt_path, O_RDONLY); 192 fd = open(evt_path, O_RDONLY);
195 if (fd < 0) 193 if (fd < 0)
196 continue; 194 continue;
197 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 195 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
@@ -225,7 +223,6 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
225 closedir(evt_dir); 223 closedir(evt_dir);
226 } 224 }
227 225
228cleanup:
229 closedir(sys_dir); 226 closedir(sys_dir);
230 return NULL; 227 return NULL;
231} 228}
@@ -761,28 +758,24 @@ static void print_tracepoint_events(void)
761{ 758{
762 DIR *sys_dir, *evt_dir; 759 DIR *sys_dir, *evt_dir;
763 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; 760 struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
764 int sys_dir_fd;
765 char evt_path[MAXPATHLEN]; 761 char evt_path[MAXPATHLEN];
762 char dir_path[MAXPATHLEN];
766 763
767 if (valid_debugfs_mount(debugfs_path)) 764 if (valid_debugfs_mount(debugfs_path))
768 return; 765 return;
769 766
770 sys_dir = opendir(debugfs_path); 767 sys_dir = opendir(debugfs_path);
771 if (!sys_dir) 768 if (!sys_dir)
772 goto cleanup; 769 return;
773 sys_dir_fd = dirfd(sys_dir);
774 770
775 for_each_subsystem(sys_dir, sys_dirent, sys_next) { 771 for_each_subsystem(sys_dir, sys_dirent, sys_next) {
776 int dfd = openat(sys_dir_fd, sys_dirent.d_name, 772
777 O_RDONLY|O_DIRECTORY), evt_dir_fd; 773 snprintf(dir_path, MAXPATHLEN, "%s/%s", debugfs_path,
778 if (dfd == -1) 774 sys_dirent.d_name);
779 continue; 775 evt_dir = opendir(dir_path);
780 evt_dir = fdopendir(dfd); 776 if (!evt_dir)
781 if (!evt_dir) {
782 close(dfd);
783 continue; 777 continue;
784 } 778
785 evt_dir_fd = dirfd(evt_dir);
786 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) { 779 for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
787 snprintf(evt_path, MAXPATHLEN, "%s:%s", 780 snprintf(evt_path, MAXPATHLEN, "%s:%s",
788 sys_dirent.d_name, evt_dirent.d_name); 781 sys_dirent.d_name, evt_dirent.d_name);
@@ -791,8 +784,6 @@ static void print_tracepoint_events(void)
791 } 784 }
792 closedir(evt_dir); 785 closedir(evt_dir);
793 } 786 }
794
795cleanup:
796 closedir(sys_dir); 787 closedir(sys_dir);
797} 788}
798 789
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index fd3d9c8e90fc..559fb06210f5 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -833,7 +833,7 @@ int dso__load_modules(struct dso *self, symbol_filter_t filter, int v)
833 struct mod_dso *mods = mod_dso__new_dso("modules"); 833 struct mod_dso *mods = mod_dso__new_dso("modules");
834 struct module *pos; 834 struct module *pos;
835 struct rb_node *next; 835 struct rb_node *next;
836 int err; 836 int err, count = 0;
837 837
838 err = mod_dso__load_modules(mods); 838 err = mod_dso__load_modules(mods);
839 839
@@ -852,14 +852,16 @@ int dso__load_modules(struct dso *self, symbol_filter_t filter, int v)
852 break; 852 break;
853 853
854 next = rb_next(&pos->rb_node); 854 next = rb_next(&pos->rb_node);
855 count += err;
855 } 856 }
856 857
857 if (err < 0) { 858 if (err < 0) {
858 mod_dso__delete_modules(mods); 859 mod_dso__delete_modules(mods);
859 mod_dso__delete_self(mods); 860 mod_dso__delete_self(mods);
861 return err;
860 } 862 }
861 863
862 return err; 864 return count;
863} 865}
864 866
865static inline void dso__fill_symbol_holes(struct dso *self) 867static inline void dso__fill_symbol_holes(struct dso *self)
@@ -913,8 +915,15 @@ int dso__load_kernel(struct dso *self, const char *vmlinux,
913 915
914 if (vmlinux) { 916 if (vmlinux) {
915 err = dso__load_vmlinux(self, vmlinux, filter, v); 917 err = dso__load_vmlinux(self, vmlinux, filter, v);
916 if (err > 0 && use_modules) 918 if (err > 0 && use_modules) {
917 err = dso__load_modules(self, filter, v); 919 int syms = dso__load_modules(self, filter, v);
920
921 if (syms < 0) {
922 fprintf(stderr, "dso__load_modules failed!\n");
923 return syms;
924 }
925 err += syms;
926 }
918 } 927 }
919 928
920 if (err <= 0) 929 if (err <= 0)